repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
darkoc/clowdflows | workflows/subgroup_discovery/SubgroupDiscovery/calcHull.py | 7 | 3323 | def calcRates(subgroups):
subgroups.TPR = []
subgroups.FPR = []
P = len(subgroups.targetClassRule.TP) * 1.0 # number of all positive examples as a float
N = len(subgroups.targetClassRule.FP) * 1.0 # number of all negative examples as a float
for rule in subgroups.rules:
subgroups.TPR.append( len(rule.TP) / P ) # true positive rate for this rule
subgroups.FPR.append( len(rule.FP) / N ) # false positive example for this rule
# subgroups.TPR = [0.44, 0.34, 0.33, 0.49, 0.43, 0.49, 0.66, 0.60, 0.61, 0.78, 0.75, 0.77, 0.84, 0.82, 0.82]
# subgroups.FPR = [0.01, 0.00, 0.00, 0.02, 0.00, 0.02, 0.10, 0.07, 0.07, 0.21, 0.16, 0.19, 0.31, 0.29, 0.27]
# calculate convex hull ,important: stick this 5 linet together
subgroups.hullTPR = [0]
subgroups.hullFPR = [0]
calcHull(subgroups, subgroups.TPR[:], subgroups.FPR[:] , A=(0,0), B=(1,1))
subgroups.hullTPR.append(1)
subgroups.hullFPR.append(1)
def calcRatesSubset(subgroups):
subgroups.TPR = []
subgroups.FPR = []
P = len(subgroups.targetClassRule.TP) * 1.0 # number of all positive examples as a float
N = len(subgroups.targetClassRule.FP) * 1.0 # number of all negative examples as a float
for rule in subgroups.rules:
TPr = len(rule.TP) / P
FPr = len(rule.FP) / N
subgroups.TPR.append( TPr ) # true positive rate for this rule
subgroups.FPR.append( FPr ) # false positive example for this rule
#self.graphROC.tooltipData(FPr, TPr, rule)
def calcHull(subgroups, Y, X, A, B):
#inicialization
C = (-1,-1) # best new point point
y = -1 # best distance
index = -1
# calculate best new point
if (B[0]-A[0])==0:
#self.edtRules.appendPlainText("vertical line!!!")
pass
else:
k = (B[1]-A[1]) / (B[0]-A[0]) # coefficient of the line between A and B
for i in range(len(Y)): # check every point
yn = Y[i] -( k * ( X[i] - A[0] ) + A[1]) # vertical distance between point i and line AB
if yn>0 and yn > y: # if new distance is the greatest so far
C = (X[i], Y[i]) # the new point is the best so far
y = yn
index = i
# if new point on the hull was found
if C != (-1,-1):
# recursivey call this function on the LEFT side of the point
del X[index]
del Y[index]
Xl =[]
Yl =[]
Xu =[]
Yu =[]
for i in range(len(X)):
if X[i]>=A[0] and X[i]<=C[0] and Y[i]>A[1]:
Xl.append(X[i])
Yl.append(Y[i])
elif X[i]>=C[0] and X[i]<=B[0] and Y[i]>C[1]:
Xu.append(X[i])
Yu.append(Y[i])
calcHull(subgroups, Yl, Xl, A,C) # recursive call
# save the new point
subgroups.hullTPR.append(C[1])
subgroups.hullFPR.append(C[0])
# recursivey call this function on the RIGHT side of the point
calcHull(subgroups, Yu, Xu, C,B) # recursive call
| gpl-3.0 |
alsrgv/tensorflow | tensorflow/python/framework/composite_tensor.py | 1 | 6660 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor-like objects that are composed from tf.Tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util import nest
@six.add_metaclass(abc.ABCMeta)
class CompositeTensor(object):
"""Abstract base class for Tensor-like objects that are composed from Tensors.
Each `CompositeTensor` can be decomposed into a structured collection of
component `tf.Tensor`s, and reconstructed from those components.
The `tensorflow.python.util.nest` module has support for treating composite
tensors as structure, which makes it easy to flatten and reconstruct
composite tensors (or larger structures that contain composite tensors).
E.g.:
```python
ct = ... # Create a composite tensor.
flat_list_of_tensors = nest.flatten(ct, expand_composites=True)
transformed_list_of_tensors = ... # do something with the flat tensors.
result = nest.pack_sequence_as(ct, transformed_list_of_tensors,
expand_composites=True)
```
"""
@abc.abstractproperty
def _type_spec(self):
"""A `TypeSpec` describing the type of this value."""
raise NotImplementedError("%s._type_spec()" % type(self).__name__)
# Deprecated -- use self._type_spec._to_components(self) instead.
# TODO(b/133606651) Remove all callers and then delete this method.
def _to_components(self):
"""Decomposes this composite tensor into its component tensors.
Returns:
A nested structure of `tf.Tensor`s and `CompositeTensor`s that can be
used to reconstruct this composite tensor (along with metadata returned
by `_component_metadata`).
"""
return self._type_spec._to_components(self) # pylint: disable=protected-access
# Deprecated -- use self._type_spec instead.
# TODO(b/133606651) Remove all callers and then delete this method.
def _component_metadata(self):
"""Returns any non-tensor metadata needed to reconstruct a composite tensor.
Returns:
A nested structure of metadata that can be used to reconstruct this
composite tensor (along with the tensors returned by `_to_components`).
"""
return self._type_spec
# Deprecated -- use metadata._from_components(components) instead.
# TODO(b/133606651) Remove all callers and then delete this method.
@staticmethod
def _from_components(components, metadata):
"""Creates a composite tensor of type `cls` from components.
Args:
components: A nested structure whose values are `tf.Tensor`s or
`tf.CompositeTensor`s (as returned by `_to_components`).
metadata: A nested structure containing any additional metadata needed to
reconstruct the composite tensor (as returned by `_composite_metadata`).
Returns:
A `CompositeTensor` of type `cls`.
"""
return metadata._from_components(components) # pylint: disable=protected-access
def _shape_invariant_to_type_spec(self, shape):
"""Returns a TypeSpec given a shape invariant (used by `tf.while_loop`).
Args:
shape: A `tf.TensorShape` object. The shape invariant for this
`CompositeTensor`, or `None` if a default shape invariant should be
used (based on the value of this `CompositeTensor`).
Returns:
A nested structure whose values are `tf.TensorShape` objects, specifying
the shape invariants for the tensors that comprise this `CompositeTensor`.
"""
# New TypeSpec subclasses generally do not need to implement this --
# this method is used for backwards compatibility. Users of tf.while_loop
# can specify a type by passing in TypeSpec instead.
raise NotImplementedError("%s._shape_invariant_to_type_spec"
% type(self).__name__)
# TODO(b/133606651) Remove this property, since it's not clear what it should
# return if a CompositeTensor has a mix of graph and non-graph components.
# Update users to perform an appropraite check themselves.
@property
def _is_graph_tensor(self):
"""Returns True if this tensor's components belong to a TF graph."""
components = self._type_spec._to_components(self) # pylint: disable=protected-access
tensors = nest.flatten(components, expand_composites=True)
return any(hasattr(t, "graph") for t in tensors)
def _consumers(self):
"""Returns a list of `Operation`s that consume this `CompositeTensor`.
Returns:
A list of `Operation`s.
Raises:
RuntimeError: If this method is called while executing eagerly.
"""
consumers = nest.flatten([
component.consumers()
for component in self._to_components()
if getattr(component, "graph", None) is not None
])
return list(set(consumers))
pywrap_tensorflow.RegisterType("CompositeTensor", CompositeTensor)
def replace_composites_with_components(structure):
"""Recursively replaces CompositeTensors with their components.
Args:
structure: A `nest`-compatible structure, possibly containing composite
tensors.
Returns:
A copy of `structure`, where each composite tensor has been replaced by
its components. The result will contain no composite tensors.
Note that `nest.flatten(replace_composites_with_components(structure))`
returns the same value as `nest.flatten(structure)`.
"""
if isinstance(structure, CompositeTensor):
return replace_composites_with_components(structure._to_components()) # pylint: disable=protected-access
elif not nest.is_sequence(structure):
return structure
else:
return nest.map_structure(replace_composites_with_components, structure,
expand_composites=False)
# @TODO(edloper): Can we replace convert_to_tensor_or_xyz with just
# convert_to_tensor_or_composite? Alternatively, should composite tensors
# register a dispatch override for tf.convert_to_tensor?
| apache-2.0 |
LuckDragon82/demo | boilerplate/external/babel/core.py | 67 | 26942 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Core locale representation and locale data access."""
import os
import pickle
from babel import localedata
__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale',
'parse_locale']
__docformat__ = 'restructuredtext en'
_global_data = None
def get_global(key):
"""Return the dictionary for the given key in the global data.
The global data is stored in the ``babel/global.dat`` file and contains
information independent of individual locales.
>>> get_global('zone_aliases')['UTC']
'Etc/GMT'
>>> get_global('zone_territories')['Europe/Berlin']
'DE'
:param key: the data key
:return: the dictionary found in the global data under the given key
:rtype: `dict`
:since: version 0.9
"""
global _global_data
if _global_data is None:
dirname = os.path.join(os.path.dirname(__file__))
filename = os.path.join(dirname, 'global.dat')
fileobj = open(filename, 'rb')
try:
_global_data = pickle.load(fileobj)
finally:
fileobj.close()
return _global_data.get(key, {})
LOCALE_ALIASES = {
'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ',
'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES',
'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES',
'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT',
'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'
}
class UnknownLocaleError(Exception):
"""Exception thrown when a locale is requested for which no locale data
is available.
"""
def __init__(self, identifier):
"""Create the exception.
:param identifier: the identifier string of the unsupported locale
"""
Exception.__init__(self, 'unknown locale %r' % identifier)
self.identifier = identifier
class Locale(object):
"""Representation of a specific locale.
>>> locale = Locale('en', 'US')
>>> repr(locale)
'<Locale "en_US">'
>>> locale.display_name
u'English (United States)'
A `Locale` object can also be instantiated from a raw locale string:
>>> locale = Locale.parse('en-US', sep='-')
>>> repr(locale)
'<Locale "en_US">'
`Locale` objects provide access to a collection of locale data, such as
territory and language names, number and date format patterns, and more:
>>> locale.number_symbols['decimal']
u'.'
If a locale is requested for which no locale data is available, an
`UnknownLocaleError` is raised:
>>> Locale.parse('en_DE')
Traceback (most recent call last):
...
UnknownLocaleError: unknown locale 'en_DE'
:see: `IETF RFC 3066 <http://www.ietf.org/rfc/rfc3066.txt>`_
"""
def __init__(self, language, territory=None, script=None, variant=None):
"""Initialize the locale object from the given identifier components.
>>> locale = Locale('en', 'US')
>>> locale.language
'en'
>>> locale.territory
'US'
:param language: the language code
:param territory: the territory (country or region) code
:param script: the script code
:param variant: the variant code
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
"""
self.language = language
self.territory = territory
self.script = script
self.variant = variant
self.__data = None
identifier = str(self)
if not localedata.exists(identifier):
raise UnknownLocaleError(identifier)
def default(cls, category=None, aliases=LOCALE_ALIASES):
"""Return the system default locale for the specified category.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> Locale.default('LC_MESSAGES')
<Locale "fr_FR">
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
:return: the value of the variable, or any of the fallbacks
(``LANGUAGE``, ``LC_ALL``, ``LC_CTYPE``, and ``LANG``)
:rtype: `Locale`
:see: `default_locale`
"""
return cls(default_locale(category, aliases=aliases))
default = classmethod(default)
def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
<Locale "de_DE">
>>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de'])
<Locale "de">
>>> Locale.negotiate(['de_DE', 'de'], ['en_US'])
You can specify the character used in the locale identifiers to separate
the differnet components. This separator is applied to both lists. Also,
case is ignored in the comparison:
>>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-')
<Locale "de_DE">
:param preferred: the list of locale identifers preferred by the user
:param available: the list of locale identifiers available
:param aliases: a dictionary of aliases for locale identifiers
:return: the `Locale` object for the best match, or `None` if no match
was found
:rtype: `Locale`
:see: `negotiate_locale`
"""
identifier = negotiate_locale(preferred, available, sep=sep,
aliases=aliases)
if identifier:
return Locale.parse(identifier, sep=sep)
negotiate = classmethod(negotiate)
def parse(cls, identifier, sep='_'):
"""Create a `Locale` instance for the given locale identifier.
>>> l = Locale.parse('de-DE', sep='-')
>>> l.display_name
u'Deutsch (Deutschland)'
If the `identifier` parameter is not a string, but actually a `Locale`
object, that object is returned:
>>> Locale.parse(l)
<Locale "de_DE">
:param identifier: the locale identifier string
:param sep: optional component separator
:return: a corresponding `Locale` instance
:rtype: `Locale`
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
:see: `parse_locale`
"""
if isinstance(identifier, basestring):
return cls(*parse_locale(identifier, sep=sep))
return identifier
parse = classmethod(parse)
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<Locale "%s">' % str(self)
def __str__(self):
return '_'.join(filter(None, [self.language, self.script,
self.territory, self.variant]))
def _data(self):
if self.__data is None:
self.__data = localedata.LocaleDataDict(localedata.load(str(self)))
return self.__data
_data = property(_data)
def get_display_name(self, locale=None):
"""Return the display name of the locale using the given locale.
The display name will include the language, territory, script, and
variant, if those are specified.
>>> Locale('zh', 'CN', script='Hans').get_display_name('en')
u'Chinese (Simplified Han, China)'
:param locale: the locale to use
:return: the display name
"""
if locale is None:
locale = self
locale = Locale.parse(locale)
retval = locale.languages.get(self.language)
if self.territory or self.script or self.variant:
details = []
if self.script:
details.append(locale.scripts.get(self.script))
if self.territory:
details.append(locale.territories.get(self.territory))
if self.variant:
details.append(locale.variants.get(self.variant))
details = filter(None, details)
if details:
retval += ' (%s)' % u', '.join(details)
return retval
display_name = property(get_display_name, doc="""\
The localized display name of the locale.
>>> Locale('en').display_name
u'English'
>>> Locale('en', 'US').display_name
u'English (United States)'
>>> Locale('sv').display_name
u'svenska'
:type: `unicode`
""")
def english_name(self):
return self.get_display_name(Locale('en'))
english_name = property(english_name, doc="""\
The english display name of the locale.
>>> Locale('de').english_name
u'German'
>>> Locale('de', 'DE').english_name
u'German (Germany)'
:type: `unicode`
""")
#{ General Locale Display Names
def languages(self):
return self._data['languages']
languages = property(languages, doc="""\
Mapping of language codes to translated language names.
>>> Locale('de', 'DE').languages['ja']
u'Japanisch'
:type: `dict`
:see: `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_
""")
def scripts(self):
return self._data['scripts']
scripts = property(scripts, doc="""\
Mapping of script codes to translated script names.
>>> Locale('en', 'US').scripts['Hira']
u'Hiragana'
:type: `dict`
:see: `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_
""")
def territories(self):
return self._data['territories']
territories = property(territories, doc="""\
Mapping of script codes to translated script names.
>>> Locale('es', 'CO').territories['DE']
u'Alemania'
:type: `dict`
:see: `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_
""")
def variants(self):
return self._data['variants']
variants = property(variants, doc="""\
Mapping of script codes to translated script names.
>>> Locale('de', 'DE').variants['1901']
u'Alte deutsche Rechtschreibung'
:type: `dict`
""")
#{ Number Formatting
def currencies(self):
return self._data['currency_names']
currencies = property(currencies, doc="""\
Mapping of currency codes to translated currency names.
>>> Locale('en').currencies['COP']
u'Colombian Peso'
>>> Locale('de', 'DE').currencies['COP']
u'Kolumbianischer Peso'
:type: `dict`
""")
def currency_symbols(self):
return self._data['currency_symbols']
currency_symbols = property(currency_symbols, doc="""\
Mapping of currency codes to symbols.
>>> Locale('en', 'US').currency_symbols['USD']
u'$'
>>> Locale('es', 'CO').currency_symbols['USD']
u'US$'
:type: `dict`
""")
def number_symbols(self):
return self._data['number_symbols']
number_symbols = property(number_symbols, doc="""\
Symbols used in number formatting.
>>> Locale('fr', 'FR').number_symbols['decimal']
u','
:type: `dict`
""")
def decimal_formats(self):
return self._data['decimal_formats']
decimal_formats = property(decimal_formats, doc="""\
Locale patterns for decimal number formatting.
>>> Locale('en', 'US').decimal_formats[None]
<NumberPattern u'#,##0.###'>
:type: `dict`
""")
def currency_formats(self):
return self._data['currency_formats']
currency_formats = property(currency_formats, doc=r"""\
Locale patterns for currency number formatting.
>>> print Locale('en', 'US').currency_formats[None]
<NumberPattern u'\xa4#,##0.00'>
:type: `dict`
""")
def percent_formats(self):
return self._data['percent_formats']
percent_formats = property(percent_formats, doc="""\
Locale patterns for percent number formatting.
>>> Locale('en', 'US').percent_formats[None]
<NumberPattern u'#,##0%'>
:type: `dict`
""")
def scientific_formats(self):
return self._data['scientific_formats']
scientific_formats = property(scientific_formats, doc="""\
Locale patterns for scientific number formatting.
>>> Locale('en', 'US').scientific_formats[None]
<NumberPattern u'#E0'>
:type: `dict`
""")
#{ Calendar Information and Date Formatting
def periods(self):
return self._data['periods']
periods = property(periods, doc="""\
Locale display names for day periods (AM/PM).
>>> Locale('en', 'US').periods['am']
u'AM'
:type: `dict`
""")
def days(self):
return self._data['days']
days = property(days, doc="""\
Locale display names for weekdays.
>>> Locale('de', 'DE').days['format']['wide'][3]
u'Donnerstag'
:type: `dict`
""")
def months(self):
return self._data['months']
months = property(months, doc="""\
Locale display names for months.
>>> Locale('de', 'DE').months['format']['wide'][10]
u'Oktober'
:type: `dict`
""")
def quarters(self):
return self._data['quarters']
quarters = property(quarters, doc="""\
Locale display names for quarters.
>>> Locale('de', 'DE').quarters['format']['wide'][1]
u'1. Quartal'
:type: `dict`
""")
def eras(self):
return self._data['eras']
eras = property(eras, doc="""\
Locale display names for eras.
>>> Locale('en', 'US').eras['wide'][1]
u'Anno Domini'
>>> Locale('en', 'US').eras['abbreviated'][0]
u'BC'
:type: `dict`
""")
def time_zones(self):
return self._data['time_zones']
time_zones = property(time_zones, doc="""\
Locale display names for time zones.
>>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight']
u'British Summer Time'
>>> Locale('en', 'US').time_zones['America/St_Johns']['city']
u"St. John's"
:type: `dict`
""")
def meta_zones(self):
return self._data['meta_zones']
meta_zones = property(meta_zones, doc="""\
Locale display names for meta time zones.
Meta time zones are basically groups of different Olson time zones that
have the same GMT offset and daylight savings time.
>>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight']
u'Central European Summer Time'
:type: `dict`
:since: version 0.9
""")
def zone_formats(self):
return self._data['zone_formats']
zone_formats = property(zone_formats, doc=r"""\
Patterns related to the formatting of time zones.
>>> Locale('en', 'US').zone_formats['fallback']
u'%(1)s (%(0)s)'
>>> Locale('pt', 'BR').zone_formats['region']
u'Hor\xe1rio %s'
:type: `dict`
:since: version 0.9
""")
def first_week_day(self):
return self._data['week_data']['first_day']
first_week_day = property(first_week_day, doc="""\
The first day of a week, with 0 being Monday.
>>> Locale('de', 'DE').first_week_day
0
>>> Locale('en', 'US').first_week_day
6
:type: `int`
""")
def weekend_start(self):
return self._data['week_data']['weekend_start']
weekend_start = property(weekend_start, doc="""\
The day the weekend starts, with 0 being Monday.
>>> Locale('de', 'DE').weekend_start
5
:type: `int`
""")
def weekend_end(self):
return self._data['week_data']['weekend_end']
weekend_end = property(weekend_end, doc="""\
The day the weekend ends, with 0 being Monday.
>>> Locale('de', 'DE').weekend_end
6
:type: `int`
""")
def min_week_days(self):
return self._data['week_data']['min_days']
min_week_days = property(min_week_days, doc="""\
The minimum number of days in a week so that the week is counted as the
first week of a year or month.
>>> Locale('de', 'DE').min_week_days
4
:type: `int`
""")
def date_formats(self):
return self._data['date_formats']
date_formats = property(date_formats, doc="""\
Locale patterns for date formatting.
>>> Locale('en', 'US').date_formats['short']
<DateTimePattern u'M/d/yy'>
>>> Locale('fr', 'FR').date_formats['long']
<DateTimePattern u'd MMMM yyyy'>
:type: `dict`
""")
def time_formats(self):
return self._data['time_formats']
time_formats = property(time_formats, doc="""\
Locale patterns for time formatting.
>>> Locale('en', 'US').time_formats['short']
<DateTimePattern u'h:mm a'>
>>> Locale('fr', 'FR').time_formats['long']
<DateTimePattern u'HH:mm:ss z'>
:type: `dict`
""")
def datetime_formats(self):
return self._data['datetime_formats']
datetime_formats = property(datetime_formats, doc="""\
Locale patterns for datetime formatting.
>>> Locale('en').datetime_formats[None]
u'{1} {0}'
>>> Locale('th').datetime_formats[None]
u'{1}, {0}'
:type: `dict`
""")
def default_locale(category=None, aliases=LOCALE_ALIASES):
"""Returns the system default locale for a given category, based on
environment variables.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> default_locale('LC_MESSAGES')
'fr_FR'
The "C" or "POSIX" pseudo-locales are treated as aliases for the
"en_US_POSIX" locale:
>>> os.environ['LC_MESSAGES'] = 'POSIX'
>>> default_locale('LC_MESSAGES')
'en_US_POSIX'
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
:return: the value of the variable, or any of the fallbacks (``LANGUAGE``,
``LC_ALL``, ``LC_CTYPE``, and ``LANG``)
:rtype: `str`
"""
varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')
for name in filter(None, varnames):
locale = os.getenv(name)
if locale:
if name == 'LANGUAGE' and ':' in locale:
# the LANGUAGE variable may contain a colon-separated list of
# language codes; we just pick the language on the list
locale = locale.split(':')[0]
if locale in ('C', 'POSIX'):
locale = 'en_US_POSIX'
elif aliases and locale in aliases:
locale = aliases[locale]
try:
return '_'.join(filter(None, parse_locale(locale)))
except ValueError:
pass
def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de'])
'de'
Case is ignored by the algorithm, the result uses the case of the preferred
locale identifier:
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
By default, some web browsers unfortunately do not include the territory
in the locale identifier for many locales, and some don't even allow the
user to easily add the territory. So while you may prefer using qualified
locale identifiers in your web-application, they would not normally match
the language-only locale sent by such browsers. To workaround that, this
function uses a default mapping of commonly used langauge-only locale
identifiers to identifiers including the territory:
>>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US'])
'ja_JP'
Some browsers even use an incorrect or outdated language code, such as "no"
for Norwegian, where the correct locale identifier would actually be "nb_NO"
(Bokmål) or "nn_NO" (Nynorsk). The aliases are intended to take care of
such cases, too:
>>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE'])
'nb_NO'
You can override this default mapping by passing a different `aliases`
dictionary to this function, or you can bypass the behavior althogher by
setting the `aliases` parameter to `None`.
:param preferred: the list of locale strings preferred by the user
:param available: the list of locale strings available
:param sep: character that separates the different parts of the locale
strings
:param aliases: a dictionary of aliases for locale identifiers
:return: the locale identifier for the best match, or `None` if no match
was found
:rtype: `str`
"""
available = [a.lower() for a in available if a]
for locale in preferred:
ll = locale.lower()
if ll in available:
return locale
if aliases:
alias = aliases.get(ll)
if alias:
alias = alias.replace('_', sep)
if alias.lower() in available:
return alias
parts = locale.split(sep)
if len(parts) > 1 and parts[0].lower() in available:
return parts[0]
return None
def parse_locale(identifier, sep='_'):
"""Parse a locale identifier into a tuple of the form::
``(language, territory, script, variant)``
>>> parse_locale('zh_CN')
('zh', 'CN', None, None)
>>> parse_locale('zh_Hans_CN')
('zh', 'CN', 'Hans', None)
The default component separator is "_", but a different separator can be
specified using the `sep` parameter:
>>> parse_locale('zh-CN', sep='-')
('zh', 'CN', None, None)
If the identifier cannot be parsed into a locale, a `ValueError` exception
is raised:
>>> parse_locale('not_a_LOCALE_String')
Traceback (most recent call last):
...
ValueError: 'not_a_LOCALE_String' is not a valid locale identifier
Encoding information and locale modifiers are removed from the identifier:
>>> parse_locale('it_IT@euro')
('it', 'IT', None, None)
>>> parse_locale('en_US.UTF-8')
('en', 'US', None, None)
>>> parse_locale('de_DE.iso885915@euro')
('de', 'DE', None, None)
:param identifier: the locale identifier string
:param sep: character that separates the different components of the locale
identifier
:return: the ``(language, territory, script, variant)`` tuple
:rtype: `tuple`
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
:see: `IETF RFC 4646 <http://www.ietf.org/rfc/rfc4646.txt>`_
"""
if '.' in identifier:
# this is probably the charset/encoding, which we don't care about
identifier = identifier.split('.', 1)[0]
if '@' in identifier:
# this is a locale modifier such as @euro, which we don't care about
# either
identifier = identifier.split('@', 1)[0]
parts = identifier.split(sep)
lang = parts.pop(0).lower()
if not lang.isalpha():
raise ValueError('expected only letters, got %r' % lang)
script = territory = variant = None
if parts:
if len(parts[0]) == 4 and parts[0].isalpha():
script = parts.pop(0).title()
if parts:
if len(parts[0]) == 2 and parts[0].isalpha():
territory = parts.pop(0).upper()
elif len(parts[0]) == 3 and parts[0].isdigit():
territory = parts.pop(0)
if parts:
if len(parts[0]) == 4 and parts[0][0].isdigit() or \
len(parts[0]) >= 5 and parts[0][0].isalpha():
variant = parts.pop()
if parts:
raise ValueError('%r is not a valid locale identifier' % identifier)
return lang, territory, script, variant
| lgpl-3.0 |
Aerotenna/Firmware | integrationtests/python_src/px4_it/mavros/mavros_test_common.py | 4 | 17211 | #!/usr/bin/env python2
from __future__ import division
import unittest
import rospy
import math
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import Altitude, ExtendedState, HomePosition, State, \
WaypointList
from mavros_msgs.srv import CommandBool, ParamGet, SetMode, WaypointClear, \
WaypointPush
from pymavlink import mavutil
from sensor_msgs.msg import NavSatFix
class MavrosTestCommon(unittest.TestCase):
def __init__(self, *args):
super(MavrosTestCommon, self).__init__(*args)
def setUp(self):
self.altitude = Altitude()
self.extended_state = ExtendedState()
self.global_position = NavSatFix()
self.home_position = HomePosition()
self.local_position = PoseStamped()
self.mission_wp = WaypointList()
self.state = State()
self.mav_type = None
self.sub_topics_ready = {
key: False
for key in [
'alt', 'ext_state', 'global_pos', 'home_pos', 'local_pos',
'mission_wp', 'state'
]
}
# ROS services
service_timeout = 30
rospy.loginfo("waiting for ROS services")
try:
rospy.wait_for_service('mavros/param/get', service_timeout)
rospy.wait_for_service('mavros/cmd/arming', service_timeout)
rospy.wait_for_service('mavros/mission/push', service_timeout)
rospy.wait_for_service('mavros/mission/clear', service_timeout)
rospy.wait_for_service('mavros/set_mode', service_timeout)
rospy.loginfo("ROS services are up")
except rospy.ROSException:
self.fail("failed to connect to services")
self.get_param_srv = rospy.ServiceProxy('mavros/param/get', ParamGet)
self.set_arming_srv = rospy.ServiceProxy('mavros/cmd/arming',
CommandBool)
self.set_mode_srv = rospy.ServiceProxy('mavros/set_mode', SetMode)
self.wp_clear_srv = rospy.ServiceProxy('mavros/mission/clear',
WaypointClear)
self.wp_push_srv = rospy.ServiceProxy('mavros/mission/push',
WaypointPush)
# ROS subscribers
self.alt_sub = rospy.Subscriber('mavros/altitude', Altitude,
self.altitude_callback)
self.ext_state_sub = rospy.Subscriber('mavros/extended_state',
ExtendedState,
self.extended_state_callback)
self.global_pos_sub = rospy.Subscriber('mavros/global_position/global',
NavSatFix,
self.global_position_callback)
self.home_pos_sub = rospy.Subscriber('mavros/home_position/home',
HomePosition,
self.home_position_callback)
self.local_pos_sub = rospy.Subscriber('mavros/local_position/pose',
PoseStamped,
self.local_position_callback)
self.mission_wp_sub = rospy.Subscriber(
'mavros/mission/waypoints', WaypointList, self.mission_wp_callback)
self.state_sub = rospy.Subscriber('mavros/state', State,
self.state_callback)
def tearDown(self):
self.log_topic_vars()
#
# Callback functions
#
def altitude_callback(self, data):
self.altitude = data
# amsl has been observed to be nan while other fields are valid
if not self.sub_topics_ready['alt'] and not math.isnan(data.amsl):
self.sub_topics_ready['alt'] = True
def extended_state_callback(self, data):
if self.extended_state.vtol_state != data.vtol_state:
rospy.loginfo("VTOL state changed from {0} to {1}".format(
mavutil.mavlink.enums['MAV_VTOL_STATE']
[self.extended_state.vtol_state].name, mavutil.mavlink.enums[
'MAV_VTOL_STATE'][data.vtol_state].name))
if self.extended_state.landed_state != data.landed_state:
rospy.loginfo("landed state changed from {0} to {1}".format(
mavutil.mavlink.enums['MAV_LANDED_STATE']
[self.extended_state.landed_state].name, mavutil.mavlink.enums[
'MAV_LANDED_STATE'][data.landed_state].name))
self.extended_state = data
if not self.sub_topics_ready['ext_state']:
self.sub_topics_ready['ext_state'] = True
def global_position_callback(self, data):
self.global_position = data
if not self.sub_topics_ready['global_pos']:
self.sub_topics_ready['global_pos'] = True
def home_position_callback(self, data):
self.home_position = data
if not self.sub_topics_ready['home_pos']:
self.sub_topics_ready['home_pos'] = True
def local_position_callback(self, data):
self.local_position = data
if not self.sub_topics_ready['local_pos']:
self.sub_topics_ready['local_pos'] = True
def mission_wp_callback(self, data):
if self.mission_wp.current_seq != data.current_seq:
rospy.loginfo("current mission waypoint sequence updated: {0}".
format(data.current_seq))
self.mission_wp = data
if not self.sub_topics_ready['mission_wp']:
self.sub_topics_ready['mission_wp'] = True
def state_callback(self, data):
if self.state.armed != data.armed:
rospy.loginfo("armed state changed from {0} to {1}".format(
self.state.armed, data.armed))
if self.state.connected != data.connected:
rospy.loginfo("connected changed from {0} to {1}".format(
self.state.connected, data.connected))
if self.state.mode != data.mode:
rospy.loginfo("mode changed from {0} to {1}".format(
self.state.mode, data.mode))
if self.state.system_status != data.system_status:
rospy.loginfo("system_status changed from {0} to {1}".format(
mavutil.mavlink.enums['MAV_STATE'][
self.state.system_status].name, mavutil.mavlink.enums[
'MAV_STATE'][data.system_status].name))
self.state = data
# mavros publishes a disconnected state message on init
if not self.sub_topics_ready['state'] and data.connected:
self.sub_topics_ready['state'] = True
#
# Helper methods
#
def set_arm(self, arm, timeout):
"""arm: True to arm or False to disarm, timeout(int): seconds"""
rospy.loginfo("setting FCU arm: {0}".format(arm))
old_arm = self.state.armed
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
arm_set = False
for i in xrange(timeout * loop_freq):
if self.state.armed == arm:
arm_set = True
rospy.loginfo("set arm success | seconds: {0} of {1}".format(
i / loop_freq, timeout))
break
else:
try:
res = self.set_arming_srv(arm)
if not res.success:
rospy.logerr("failed to send arm command")
except rospy.ServiceException as e:
rospy.logerr(e)
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(arm_set, (
"failed to set arm | new arm: {0}, old arm: {1} | timeout(seconds): {2}".
format(arm, old_arm, timeout)))
def set_mode(self, mode, timeout):
"""mode: PX4 mode string, timeout(int): seconds"""
rospy.loginfo("setting FCU mode: {0}".format(mode))
old_mode = self.state.mode
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
mode_set = False
for i in xrange(timeout * loop_freq):
if self.state.mode == mode:
mode_set = True
rospy.loginfo("set mode success | seconds: {0} of {1}".format(
i / loop_freq, timeout))
break
else:
try:
res = self.set_mode_srv(0, mode) # 0 is custom mode
if not res.mode_sent:
rospy.logerr("failed to send mode command")
except rospy.ServiceException as e:
rospy.logerr(e)
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(mode_set, (
"failed to set mode | new mode: {0}, old mode: {1} | timeout(seconds): {2}".
format(mode, old_mode, timeout)))
def wait_for_topics(self, timeout):
"""wait for simulation to be ready, make sure we're getting topic info
from all topics by checking dictionary of flag values set in callbacks,
timeout(int): seconds"""
rospy.loginfo("waiting for subscribed topics to be ready")
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
simulation_ready = False
for i in xrange(timeout * loop_freq):
if all(value for value in self.sub_topics_ready.values()):
simulation_ready = True
rospy.loginfo("simulation topics ready | seconds: {0} of {1}".
format(i / loop_freq, timeout))
break
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(simulation_ready, (
"failed to hear from all subscribed simulation topics | topic ready flags: {0} | timeout(seconds): {1}".
format(self.sub_topics_ready, timeout)))
def wait_for_landed_state(self, desired_landed_state, timeout, index):
rospy.loginfo("waiting for landed state | state: {0}, index: {1}".
format(mavutil.mavlink.enums['MAV_LANDED_STATE'][
desired_landed_state].name, index))
loop_freq = 10 # Hz
rate = rospy.Rate(loop_freq)
landed_state_confirmed = False
for i in xrange(timeout * loop_freq):
if self.extended_state.landed_state == desired_landed_state:
landed_state_confirmed = True
rospy.loginfo("landed state confirmed | seconds: {0} of {1}".
format(i / loop_freq, timeout))
break
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(landed_state_confirmed, (
"landed state not detected | desired: {0}, current: {1} | index: {2}, timeout(seconds): {3}".
format(mavutil.mavlink.enums['MAV_LANDED_STATE'][
desired_landed_state].name, mavutil.mavlink.enums[
'MAV_LANDED_STATE'][self.extended_state.landed_state].name,
index, timeout)))
def wait_for_vtol_state(self, transition, timeout, index):
"""Wait for VTOL transition, timeout(int): seconds"""
rospy.loginfo(
"waiting for VTOL transition | transition: {0}, index: {1}".format(
mavutil.mavlink.enums['MAV_VTOL_STATE'][
transition].name, index))
loop_freq = 10 # Hz
rate = rospy.Rate(loop_freq)
transitioned = False
for i in xrange(timeout * loop_freq):
if transition == self.extended_state.vtol_state:
rospy.loginfo("transitioned | seconds: {0} of {1}".format(
i / loop_freq, timeout))
transitioned = True
break
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(transitioned, (
"transition not detected | desired: {0}, current: {1} | index: {2} timeout(seconds): {3}".
format(mavutil.mavlink.enums['MAV_VTOL_STATE'][transition].name,
mavutil.mavlink.enums['MAV_VTOL_STATE'][
self.extended_state.vtol_state].name, index, timeout)))
def clear_wps(self, timeout):
"""timeout(int): seconds"""
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
wps_cleared = False
for i in xrange(timeout * loop_freq):
if not self.mission_wp.waypoints:
wps_cleared = True
rospy.loginfo("clear waypoints success | seconds: {0} of {1}".
format(i / loop_freq, timeout))
break
else:
try:
res = self.wp_clear_srv()
if not res.success:
rospy.logerr("failed to send waypoint clear command")
except rospy.ServiceException as e:
rospy.logerr(e)
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(wps_cleared, (
"failed to clear waypoints | timeout(seconds): {0}".format(timeout)
))
def send_wps(self, waypoints, timeout):
"""waypoints, timeout(int): seconds"""
rospy.loginfo("sending mission waypoints")
if self.mission_wp.waypoints:
rospy.loginfo("FCU already has mission waypoints")
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
wps_sent = False
wps_verified = False
for i in xrange(timeout * loop_freq):
if not wps_sent:
try:
res = self.wp_push_srv(start_index=0, waypoints=waypoints)
wps_sent = res.success
if wps_sent:
rospy.loginfo("waypoints successfully transferred")
except rospy.ServiceException as e:
rospy.logerr(e)
else:
if len(waypoints) == len(self.mission_wp.waypoints):
rospy.loginfo("number of waypoints transferred: {0}".
format(len(waypoints)))
wps_verified = True
if wps_sent and wps_verified:
rospy.loginfo("send waypoints success | seconds: {0} of {1}".
format(i / loop_freq, timeout))
break
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue((
wps_sent and wps_verified
), "mission could not be transferred and verified | timeout(seconds): {0}".
format(timeout))
def wait_for_mav_type(self, timeout):
"""Wait for MAV_TYPE parameter, timeout(int): seconds"""
rospy.loginfo("waiting for MAV_TYPE")
loop_freq = 1 # Hz
rate = rospy.Rate(loop_freq)
res = False
for i in xrange(timeout * loop_freq):
try:
res = self.get_param_srv('MAV_TYPE')
if res.success:
self.mav_type = res.value.integer
rospy.loginfo(
"MAV_TYPE received | type: {0} | seconds: {1} of {2}".
format(mavutil.mavlink.enums['MAV_TYPE'][self.mav_type]
.name, i / loop_freq, timeout))
break
except rospy.ServiceException as e:
rospy.logerr(e)
try:
rate.sleep()
except rospy.ROSException as e:
self.fail(e)
self.assertTrue(res.success, (
"MAV_TYPE param get failed | timeout(seconds): {0}".format(timeout)
))
def log_topic_vars(self):
"""log the state of topic variables"""
rospy.loginfo("========================")
rospy.loginfo("===== topic values =====")
rospy.loginfo("========================")
rospy.loginfo("altitude:\n{}".format(self.altitude))
rospy.loginfo("========================")
rospy.loginfo("extended_state:\n{}".format(self.extended_state))
rospy.loginfo("========================")
rospy.loginfo("global_position:\n{}".format(self.global_position))
rospy.loginfo("========================")
rospy.loginfo("home_position:\n{}".format(self.home_position))
rospy.loginfo("========================")
rospy.loginfo("local_position:\n{}".format(self.local_position))
rospy.loginfo("========================")
rospy.loginfo("mission_wp:\n{}".format(self.mission_wp))
rospy.loginfo("========================")
rospy.loginfo("state:\n{}".format(self.state))
rospy.loginfo("========================")
| bsd-3-clause |
murfz/Sick-Beard | lib/guessit/fileutils.py | 41 | 2942 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import s, u
import os.path
import zipfile
import io
def split_path(path):
r"""Splits the given path into the list of folders and the filename (or the
last folder if you gave it a folder path.
If the given path was an absolute path, the first element will always be:
- the '/' root folder on Unix systems
- the drive letter on Windows systems (eg: r'C:\')
- the mount point '\\' on Windows systems (eg: r'\\host\share')
>>> s(split_path('/usr/bin/smewt'))
['/', 'usr', 'bin', 'smewt']
>>> s(split_path('relative_path/to/my_folder/'))
['relative_path', 'to', 'my_folder']
"""
result = []
while True:
head, tail = os.path.split(path)
# on Unix systems, the root folder is '/'
if head == '/' and tail == '':
return ['/'] + result
# on Windows, the root folder is a drive letter (eg: 'C:\') or for shares \\
if ((len(head) == 3 and head[1:] == ':\\') or (len(head) == 2 and head == '\\\\')) and tail == '':
return [head] + result
if head == '' and tail == '':
return result
# we just split a directory ending with '/', so tail is empty
if not tail:
path = head
continue
result = [tail] + result
path = head
def file_in_same_dir(ref_file, desired_file):
"""Return the path for a file in the same dir as a given reference file.
>>> s(file_in_same_dir('~/smewt/smewt.db', 'smewt.settings'))
'~/smewt/smewt.settings'
"""
return os.path.join(*(split_path(ref_file)[:-1] + [desired_file]))
def load_file_in_same_dir(ref_file, filename):
"""Load a given file. Works even when the file is contained inside a zip."""
path = split_path(ref_file)[:-1] + [filename]
for i, p in enumerate(path):
if p.endswith('.zip'):
zfilename = os.path.join(*path[:i + 1])
zfile = zipfile.ZipFile(zfilename)
return zfile.read('/'.join(path[i + 1:]))
return u(io.open(os.path.join(*path), encoding='utf-8').read())
| gpl-3.0 |
shaolinfry/litecoin | qa/rpc-tests/bipdersig-p2p.py | 49 | 6866 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript
from io import BytesIO
import time
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
'''
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
'''
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
'''
This test is meant to exercise BIP66 (DER SIG).
Connect to a single node.
Mine 2 (version 2) blocks (save the coinbases for later).
Generate 98 more version 2 blocks, verify the node accepts.
Mine 749 version 3 blocks, verify the node accepts.
Check that the new DERSIG rules are not enforced on the 750th version 3 block.
Check that the new DERSIG rules are enforced on the 751st version 3 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP66Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=2']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
''' 98 more version 2 blocks '''
test_blocks = []
for i in range(98):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 3 blocks '''
test_blocks = []
for i in range(749):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new DERSIG rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Check that the new DERSIG rules are enforced in the 751st version 3
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in range(199):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP66Test().main()
| mit |
ryano144/intellij-community | python/helpers/epydoc/markup/doctest.py | 90 | 13069 | #
# doctest.py: Syntax Highlighting for doctest blocks
# Edward Loper
#
# Created [06/28/03 02:52 AM]
# $Id: restructuredtext.py 1210 2006-04-10 13:25:50Z edloper $
#
"""
Syntax highlighting for doctest blocks. This module defines two
functions, L{doctest_to_html()} and L{doctest_to_latex()}, which can
be used to perform syntax highlighting on doctest blocks. It also
defines the more general C{colorize_doctest()}, which could be used to
do syntac highlighting on doctest blocks with other output formats.
(Both C{doctest_to_html()} and C{doctest_to_latex()} are defined using
C{colorize_doctest()}.)
"""
__docformat__ = 'epytext en'
import re
from epydoc.util import plaintext_to_html, plaintext_to_latex
__all__ = ['doctest_to_html', 'doctest_to_latex',
'DoctestColorizer', 'XMLDoctestColorizer',
'HTMLDoctestColorizer', 'LaTeXDoctestColorizer']
def doctest_to_html(s):
"""
Perform syntax highlighting on the given doctest string, and
return the resulting HTML code. This code consists of a C{<pre>}
block with class=py-doctest. Syntax highlighting is performed
using the following css classes:
- C{py-prompt} -- the Python PS1 prompt (>>>)
- C{py-more} -- the Python PS2 prompt (...)
- C{py-keyword} -- a Python keyword (for, if, etc.)
- C{py-builtin} -- a Python builtin name (abs, dir, etc.)
- C{py-string} -- a string literal
- C{py-comment} -- a comment
- C{py-except} -- an exception traceback (up to the next >>>)
- C{py-output} -- the output from a doctest block.
- C{py-defname} -- the name of a function or class defined by
a C{def} or C{class} statement.
"""
return HTMLDoctestColorizer().colorize_doctest(s)
def doctest_to_latex(s):
"""
Perform syntax highlighting on the given doctest string, and
return the resulting LaTeX code. This code consists of an
C{alltt} environment. Syntax highlighting is performed using
the following new latex commands, which must be defined externally:
- C{\pysrcprompt} -- the Python PS1 prompt (>>>)
- C{\pysrcmore} -- the Python PS2 prompt (...)
- C{\pysrckeyword} -- a Python keyword (for, if, etc.)
- C{\pysrcbuiltin} -- a Python builtin name (abs, dir, etc.)
- C{\pysrcstring} -- a string literal
- C{\pysrccomment} -- a comment
- C{\pysrcexcept} -- an exception traceback (up to the next >>>)
- C{\pysrcoutput} -- the output from a doctest block.
- C{\pysrcdefname} -- the name of a function or class defined by
a C{def} or C{class} statement.
"""
return LaTeXDoctestColorizer().colorize_doctest(s)
class DoctestColorizer:
"""
An abstract base class for performing syntax highlighting on
doctest blocks and other bits of Python code. Subclasses should
provide definitions for:
- The L{markup()} method, which takes a substring and a tag, and
returns a colorized version of the substring.
- The L{PREFIX} and L{SUFFIX} variables, which will be added
to the beginning and end of the strings returned by
L{colorize_codeblock} and L{colorize_doctest}.
"""
#: A string that is added to the beginning of the strings
#: returned by L{colorize_codeblock} and L{colorize_doctest}.
#: Typically, this string begins a preformatted area.
PREFIX = None
#: A string that is added to the end of the strings
#: returned by L{colorize_codeblock} and L{colorize_doctest}.
#: Typically, this string ends a preformatted area.
SUFFIX = None
#: A list of the names of all Python keywords. ('as' is included
#: even though it is technically not a keyword.)
_KEYWORDS = ("and del for is raise"
"assert elif from lambda return"
"break else global not try"
"class except if or while"
"continue exec import pass yield"
"def finally in print as").split()
#: A list of all Python builtins.
_BUILTINS = [_BI for _BI in dir(__builtins__)
if not _BI.startswith('__')]
#: A regexp group that matches keywords.
_KEYWORD_GRP = '|'.join([r'\b%s\b' % _KW for _KW in _KEYWORDS])
#: A regexp group that matches Python builtins.
_BUILTIN_GRP = (r'(?<!\.)(?:%s)' % '|'.join([r'\b%s\b' % _BI
for _BI in _BUILTINS]))
#: A regexp group that matches Python strings.
_STRING_GRP = '|'.join(
[r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))',
r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"])
#: A regexp group that matches Python comments.
_COMMENT_GRP = '(#.*?$)'
#: A regexp group that matches Python ">>>" prompts.
_PROMPT1_GRP = r'^[ \t]*>>>(?:[ \t]|$)'
#: A regexp group that matches Python "..." prompts.
_PROMPT2_GRP = r'^[ \t]*\.\.\.(?:[ \t]|$)'
#: A regexp group that matches function and class definitions.
_DEFINE_GRP = r'\b(?:def|class)[ \t]+\w+'
#: A regexp that matches Python prompts
PROMPT_RE = re.compile('(%s|%s)' % (_PROMPT1_GRP, _PROMPT2_GRP),
re.MULTILINE | re.DOTALL)
#: A regexp that matches Python "..." prompts.
PROMPT2_RE = re.compile('(%s)' % _PROMPT2_GRP,
re.MULTILINE | re.DOTALL)
#: A regexp that matches doctest exception blocks.
EXCEPT_RE = re.compile(r'^[ \t]*Traceback \(most recent call last\):.*',
re.DOTALL | re.MULTILINE)
#: A regexp that matches doctest directives.
DOCTEST_DIRECTIVE_RE = re.compile(r'#[ \t]*doctest:.*')
#: A regexp that matches all of the regions of a doctest block
#: that should be colored.
DOCTEST_RE = re.compile(
r'(.*?)((?P<STRING>%s)|(?P<COMMENT>%s)|(?P<DEFINE>%s)|'
r'(?P<KEYWORD>%s)|(?P<BUILTIN>%s)|'
r'(?P<PROMPT1>%s)|(?P<PROMPT2>%s)|(?P<EOS>\Z))' % (
_STRING_GRP, _COMMENT_GRP, _DEFINE_GRP, _KEYWORD_GRP, _BUILTIN_GRP,
_PROMPT1_GRP, _PROMPT2_GRP), re.MULTILINE | re.DOTALL)
#: This regular expression is used to find doctest examples in a
#: string. This is copied from the standard Python doctest.py
#: module (after the refactoring in Python 2.4+).
DOCTEST_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)* # PS2 lines
\n?)
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
def colorize_inline(self, s):
"""
Colorize a string containing Python code. Do not add the
L{PREFIX} and L{SUFFIX} strings to the returned value. This
method is intended for generating syntax-highlighted strings
that are appropriate for inclusion as inline expressions.
"""
return self.DOCTEST_RE.sub(self.subfunc, s)
def colorize_codeblock(self, s):
"""
Colorize a string containing only Python code. This method
differs from L{colorize_doctest} in that it will not search
for doctest prompts when deciding how to colorize the string.
"""
body = self.DOCTEST_RE.sub(self.subfunc, s)
return self.PREFIX + body + self.SUFFIX
def colorize_doctest(self, s, strip_directives=False):
"""
Colorize a string containing one or more doctest examples.
"""
output = []
charno = 0
for m in self.DOCTEST_EXAMPLE_RE.finditer(s):
# Parse the doctest example:
pysrc, want = m.group('source', 'want')
# Pre-example text:
output.append(s[charno:m.start()])
# Example source code:
output.append(self.DOCTEST_RE.sub(self.subfunc, pysrc))
# Example output:
if want:
if self.EXCEPT_RE.match(want):
output += '\n'.join([self.markup(line, 'except')
for line in want.split('\n')])
else:
output += '\n'.join([self.markup(line, 'output')
for line in want.split('\n')])
# Update charno
charno = m.end()
# Add any remaining post-example text.
output.append(s[charno:])
return self.PREFIX + ''.join(output) + self.SUFFIX
def subfunc(self, match):
other, text = match.group(1, 2)
#print 'M %20r %20r' % (other, text) # <- for debugging
if other:
other = '\n'.join([self.markup(line, 'other')
for line in other.split('\n')])
if match.group('PROMPT1'):
return other + self.markup(text, 'prompt')
elif match.group('PROMPT2'):
return other + self.markup(text, 'more')
elif match.group('KEYWORD'):
return other + self.markup(text, 'keyword')
elif match.group('BUILTIN'):
return other + self.markup(text, 'builtin')
elif match.group('COMMENT'):
return other + self.markup(text, 'comment')
elif match.group('STRING') and '\n' not in text:
return other + self.markup(text, 'string')
elif match.group('STRING'):
# It's a multiline string; colorize the string & prompt
# portion of each line.
pieces = []
for line in text.split('\n'):
if self.PROMPT2_RE.match(line):
if len(line) > 4:
pieces.append(self.markup(line[:4], 'more') +
self.markup(line[4:], 'string'))
else:
pieces.append(self.markup(line[:4], 'more'))
elif line:
pieces.append(self.markup(line, 'string'))
else:
pieces.append('')
return other + '\n'.join(pieces)
elif match.group('DEFINE'):
m = re.match('(?P<def>\w+)(?P<space>\s+)(?P<name>\w+)', text)
return other + (self.markup(m.group('def'), 'keyword') +
self.markup(m.group('space'), 'other') +
self.markup(m.group('name'), 'defname'))
elif match.group('EOS') is not None:
return other
else:
assert 0, 'Unexpected match!'
def markup(self, s, tag):
"""
Apply syntax highlighting to a single substring from a doctest
block. C{s} is the substring, and C{tag} is the tag that
should be applied to the substring. C{tag} will be one of the
following strings:
- C{prompt} -- the Python PS1 prompt (>>>)
- C{more} -- the Python PS2 prompt (...)
- C{keyword} -- a Python keyword (for, if, etc.)
- C{builtin} -- a Python builtin name (abs, dir, etc.)
- C{string} -- a string literal
- C{comment} -- a comment
- C{except} -- an exception traceback (up to the next >>>)
- C{output} -- the output from a doctest block.
- C{defname} -- the name of a function or class defined by
a C{def} or C{class} statement.
- C{other} -- anything else (does *not* include output.)
"""
raise AssertionError("Abstract method")
class XMLDoctestColorizer(DoctestColorizer):
"""
A subclass of DoctestColorizer that generates XML-like output.
This class is mainly intended to be used for testing purposes.
"""
PREFIX = '<colorized>\n'
SUFFIX = '</colorized>\n'
def markup(self, s, tag):
s = s.replace('&', '&').replace('<', '<').replace('>', '>')
if tag == 'other': return s
else: return '<%s>%s</%s>' % (tag, s, tag)
class HTMLDoctestColorizer(DoctestColorizer):
"""A subclass of DoctestColorizer that generates HTML output."""
PREFIX = '<pre class="py-doctest">\n'
SUFFIX = '</pre>\n'
def markup(self, s, tag):
if tag == 'other':
return plaintext_to_html(s)
else:
return ('<span class="py-%s">%s</span>' %
(tag, plaintext_to_html(s)))
class LaTeXDoctestColorizer(DoctestColorizer):
"""A subclass of DoctestColorizer that generates LaTeX output."""
PREFIX = '\\begin{alltt}\n'
SUFFIX = '\\end{alltt}\n'
def markup(self, s, tag):
if tag == 'other':
return plaintext_to_latex(s)
else:
return '\\pysrc%s{%s}' % (tag, plaintext_to_latex(s))
| apache-2.0 |
takaaptech/sky_engine | sky/tools/webkitpy/layout_tests/breakpad/dump_reader_win_unittest.py | 59 | 3547 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.layout_tests.breakpad.dump_reader_win import DumpReaderWin
class TestDumpReaderWin(unittest.TestCase):
def test_check_is_functional_cdb_not_found(self):
host = MockHost()
host.executive = MockExecutive(should_throw=True)
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
dump_reader = DumpReaderWin(host, build_dir)
self.assertFalse(dump_reader.check_is_functional())
def test_get_pid_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.txt'
expected_pid = '4711'
host.filesystem.write_text_file(dump_file, 'channel:\npid:%s\nplat:Win32\nprod:content_shell\n' % expected_pid)
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
dump_reader = DumpReaderWin(host, build_dir)
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual(expected_pid, dump_reader._get_pid_from_dump(dump_file))
def test_get_stack_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
real_dump_file = '/crash-dumps/dump.dmp'
host.filesystem.write_text_file(dump_file, 'product:content_shell\n')
host.filesystem.write_binary_file(real_dump_file, 'MDMP')
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
dump_reader = DumpReaderWin(host, build_dir)
self.assertTrue(dump_reader.check_is_functional())
host.executive.calls = []
self.assertEqual("MOCK output of child process", dump_reader._get_stack_from_dump(dump_file))
self.assertEqual(1, len(host.executive.calls))
cmd_line = " ".join(host.executive.calls[0])
self.assertIn('cdb.exe', cmd_line)
self.assertIn(real_dump_file, cmd_line)
| bsd-3-clause |
RussianCraft/rcse | node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_sln.py | 1831 | 5099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
eastlhu/zulip | zerver/migrations/0002_django_1_8.py | 125 | 2229 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='userprofile',
managers=[
(b'objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AlterField(
model_name='appledevicetoken',
name='last_updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='mituser',
name='email',
field=models.EmailField(unique=True, max_length=254),
),
migrations.AlterField(
model_name='preregistrationuser',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='preregistrationuser',
name='streams',
field=models.ManyToManyField(to='zerver.Stream'),
),
migrations.AlterField(
model_name='pushdevicetoken',
name='last_updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='referral',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='userprofile',
name='email',
field=models.EmailField(unique=True, max_length=254, db_index=True),
),
migrations.AlterField(
model_name='userprofile',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='userprofile',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
]
| apache-2.0 |
helinb/Tickeys-linux | tickeys/kivy/input/postproc/retaintouch.py | 80 | 3224 | '''
Retain Touch
============
Reuse touch to counter lost finger behavior
'''
__all__ = ('InputPostprocRetainTouch', )
from kivy.config import Config
from kivy.vector import Vector
import time
class InputPostprocRetainTouch(object):
'''
InputPostprocRetainTouch is a post-processor to delay the 'up' event of a
touch, to reuse it under certains conditions. This module is designed to
prevent lost finger touches on some hardware/setups.
Retain touch can be configured in the Kivy config file::
[postproc]
retain_time = 100
retain_distance = 50
The distance parameter is in the range 0-1000 and time is in milliseconds.
'''
def __init__(self):
self.timeout = Config.getint('postproc', 'retain_time') / 1000.0
self.distance = Config.getint('postproc', 'retain_distance') / 1000.0
self._available = []
self._links = {}
def process(self, events):
# check if module is disabled
if self.timeout == 0:
return events
d = time.time()
for etype, touch in events[:]:
if not touch.is_touch:
continue
if etype == 'end':
events.remove((etype, touch))
if touch.uid in self._links:
selection = self._links[touch.uid]
selection.ud.__pp_retain_time__ = d
self._available.append(selection)
del self._links[touch.uid]
else:
touch.ud.__pp_retain_time__ = d
self._available.append(touch)
elif etype == 'update':
if touch.uid in self._links:
selection = self._links[touch.uid]
selection.x = touch.x
selection.y = touch.y
selection.sx = touch.sx
selection.sy = touch.sy
events.remove((etype, touch))
events.append((etype, selection))
else:
pass
elif etype == 'begin':
# new touch, found the nearest one
selection = None
selection_distance = 99999
for touch2 in self._available:
touch_distance = Vector(touch2.spos).distance(touch.spos)
if touch_distance > self.distance:
continue
if touch2.__class__ != touch.__class__:
continue
if touch_distance < selection_distance:
# eligible for continuation
selection_distance = touch_distance
selection = touch2
if selection is None:
continue
self._links[touch.uid] = selection
self._available.remove(selection)
events.remove((etype, touch))
for touch in self._available[:]:
t = touch.ud.__pp_retain_time__
if d - t > self.timeout:
self._available.remove(touch)
events.append(('end', touch))
return events
| mit |
brian-yang/mozillians | mozillians/mozspaces/models.py | 13 | 2028 | import os
import uuid
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from product_details import product_details
from pytz import common_timezones
from sorl.thumbnail import ImageField
COUNTRIES = product_details.get_regions('en-US').items()
COUNTRIES = sorted(COUNTRIES, key=lambda country: country[1])
def _calculate_photo_filename(instance, filename):
"""Generate a unique filename for uploaded photo."""
return os.path.join(settings.MOZSPACE_PHOTO_DIR,
str(uuid.uuid4()) + '.jpg')
class MozSpace(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=300)
region = models.CharField(max_length=100, blank=True, default='')
city = models.CharField(max_length=100)
country = models.CharField(max_length=5, choices=COUNTRIES)
timezone = models.CharField(
max_length=100, choices=zip(common_timezones, common_timezones))
lon = models.FloatField()
lat = models.FloatField()
phone = models.CharField(max_length=100, blank=True, default='')
email = models.EmailField(blank=True, default='')
coordinator = models.ForeignKey(User)
extra_text = models.TextField(blank=True, default='')
cover_photo = models.ForeignKey('Photo', null=True, blank=True,
related_name='featured_mozspace')
def __unicode__(self):
return self.name
class Keyword(models.Model):
keyword = models.CharField(max_length=50, unique=True)
mozspace = models.ForeignKey(MozSpace, related_name='keywords')
def save(self, *args, **kwargs):
self.keyword = self.keyword.lower()
super(Keyword, self).save(*args, **kwargs)
def __unicode__(self):
return self.keyword
class Photo(models.Model):
photofile = ImageField(upload_to=_calculate_photo_filename)
mozspace = models.ForeignKey(MozSpace, related_name='photos')
def __unicode__(self):
return unicode(self.id)
| bsd-3-clause |
srznew/heat | heat/tests/test_support.py | 13 | 3769 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.engine import support
from heat.tests import common
class SupportStatusTest(common.HeatTestCase):
def test_valid_status(self):
for sstatus in support.SUPPORT_STATUSES:
previous = support.SupportStatus(version='test_version')
status = support.SupportStatus(
status=sstatus,
message='test_message',
version='test_version',
previous_status=previous,
)
self.assertEqual(sstatus, status.status)
self.assertEqual('test_message', status.message)
self.assertEqual('test_version', status.version)
self.assertEqual(previous, status.previous_status)
self.assertEqual({
'status': sstatus,
'message': 'test_message',
'version': 'test_version',
'previous_status': {'status': 'SUPPORTED',
'message': None,
'version': 'test_version',
'previous_status': None},
}, status.to_dict())
def test_invalid_status(self):
status = support.SupportStatus(
status='RANDOM',
message='test_message',
version='test_version',
previous_status=support.SupportStatus()
)
self.assertEqual(support.UNKNOWN, status.status)
self.assertEqual('Specified status is invalid, defaulting to UNKNOWN',
status.message)
self.assertIsNone(status.version)
self.assertIsNone(status.previous_status)
self.assertEqual({
'status': 'UNKNOWN',
'message': 'Specified status is invalid, defaulting to UNKNOWN',
'version': None,
'previous_status': None,
}, status.to_dict())
def test_previous_status(self):
sstatus = support.SupportStatus(
status=support.DEPRECATED,
version='5.0.0',
previous_status=support.SupportStatus(
status=support.SUPPORTED,
version='2015.1'
)
)
self.assertEqual(support.DEPRECATED, sstatus.status)
self.assertEqual('5.0.0', sstatus.version)
self.assertEqual(support.SUPPORTED, sstatus.previous_status.status)
self.assertEqual('2015.1', sstatus.previous_status.version)
self.assertEqual({'status': 'DEPRECATED',
'version': '5.0.0',
'message': None,
'previous_status': {'status': 'SUPPORTED',
'version': '2015.1',
'message': None,
'previous_status': None}},
sstatus.to_dict())
def test_invalid_previous_status(self):
ex = self.assertRaises(ValueError,
support.SupportStatus, previous_status='YARRR')
self.assertEqual('previous_status must be SupportStatus '
'instead of %s' % str, six.text_type(ex))
| apache-2.0 |
sbg/sevenbridges-python | sevenbridges/meta/comp_mutable_dict.py | 1 | 1715 | # noinspection PyProtectedMember,PyUnresolvedReferences
class CompoundMutableDict(dict):
"""
Resource used for mutable compound dictionaries.
"""
# noinspection PyMissingConstructor
def __init__(self, **kwargs):
self._parent = kwargs.pop('_parent')
self._api = kwargs.pop('api')
for k, v in kwargs.items():
super().__setitem__(k, v)
def __setitem__(self, key, value):
super().__setitem__(key, value)
if self._name not in self._parent._dirty:
self._parent._dirty.update({self._name: {}})
if key in self._parent._data[self._name]:
if self._parent._data[self._name][key] != value:
self._parent._dirty[self._name][key] = value
self._parent._data[self._name][key] = value
else:
self._parent._data[self._name][key] = value
self._parent._dirty[self._name][key] = value
def __repr__(self):
values = {}
for k, _ in self.items():
values[k] = self[k]
return str(values)
__str__ = __repr__
def update(self, e=None, **f):
other = {}
if e:
other.update(e, **f)
else:
other.update(**f)
for k, v in other.items():
if other[k] != self[k]:
self[k] = other[k]
def items(self):
values = []
for k in self.keys():
values.append((k, self[k]))
return values
def equals(self, other):
if not type(other) == type(self):
return False
return (
self is other or
self._parent._data[self._name] == other._parent._data[self._name]
)
| apache-2.0 |
audax/kll | kll.py | 1 | 26244 | #!/usr/bin/env python3
# KLL Compiler
# Keyboard Layout Langauge
#
# Copyright (C) 2014-2015 by Jacob Alexander
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
### Imports ###
import argparse
import importlib
import io
import os
import re
import sys
import token
from pprint import pformat
from re import VERBOSE
from tokenize import generate_tokens
from kll_lib.containers import *
from kll_lib.hid_dict import *
from funcparserlib.lexer import make_tokenizer, Token, LexerError
from funcparserlib.parser import (some, a, many, oneplus, skip, finished, maybe, skip, forward_decl, NoParseError)
### Decorators ###
## Print Decorator Variables
ERROR = '\033[5;1;31mERROR\033[0m:'
## Python Text Formatting Fixer...
## Because the creators of Python are averse to proper capitalization.
textFormatter_lookup = {
"usage: " : "Usage: ",
"optional arguments" : "Optional Arguments",
}
def textFormatter_gettext( s ):
return textFormatter_lookup.get( s, s )
argparse._ = textFormatter_gettext
### Argument Parsing ###
def checkFileExists( filename ):
if not os.path.isfile( filename ):
print ( "{0} {1} does not exist...".format( ERROR, filename ) )
sys.exit( 1 )
def processCommandLineArgs():
# Setup argument processor
pArgs = argparse.ArgumentParser(
usage="%(prog)s [options] <file1>...",
description="Generates .h file state tables and pointer indices from KLL .kll files.",
epilog="Example: {0} mykeyboard.kll -d colemak.kll -p hhkbpro2.kll -p symbols.kll".format( os.path.basename( sys.argv[0] ) ),
formatter_class=argparse.RawTextHelpFormatter,
add_help=False,
)
# Positional Arguments
pArgs.add_argument( 'files', type=str, nargs='+',
help=argparse.SUPPRESS ) # Suppressed help output, because Python output is verbosely ugly
# Optional Arguments
pArgs.add_argument( '-b', '--backend', type=str, default="kiibohd",
help="Specify target backend for the KLL compiler.\n"
"Default: kiibohd\n"
"Options: kiibohd, json" )
pArgs.add_argument( '-d', '--default', type=str, nargs='+',
help="Specify .kll files to layer on top of the default map to create a combined map." )
pArgs.add_argument( '-p', '--partial', type=str, nargs='+', action='append',
help="Specify .kll files to generate partial map, multiple files per flag.\n"
"Each -p defines another partial map.\n"
"Base .kll files (that define the scan code maps) must be defined for each partial map." )
pArgs.add_argument( '-t', '--templates', type=str, nargs='+',
help="Specify template used to generate the keymap.\n"
"Default: <backend specific>" )
pArgs.add_argument( '-o', '--outputs', type=str, nargs='+',
help="Specify output file. Writes to current working directory by default.\n"
"Default: <backend specific>" )
pArgs.add_argument( '-h', '--help', action="help",
help="This message." )
# Process Arguments
args = pArgs.parse_args()
# Parameters
baseFiles = args.files
defaultFiles = args.default
partialFileSets = args.partial
if defaultFiles is None:
defaultFiles = []
if partialFileSets is None:
partialFileSets = [[]]
# Check file existance
for filename in baseFiles:
checkFileExists( filename )
for filename in defaultFiles:
checkFileExists( filename )
for partial in partialFileSets:
for filename in partial:
checkFileExists( filename )
return (baseFiles, defaultFiles, partialFileSets, args.backend, args.templates, args.outputs)
### Tokenizer ###
def tokenize( string ):
"""str -> Sequence(Token)"""
# Basic Tokens Spec
specs = [
( 'Comment', ( r' *#.*', ) ),
( 'Space', ( r'[ \t\r\n]+', ) ),
( 'USBCode', ( r'U(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'USBCodeStart', ( r'U\[', ) ),
( 'ConsCode', ( r'CONS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'ConsCodeStart', ( r'CONS\[', ) ),
( 'SysCode', ( r'SYS(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'SysCodeStart', ( r'SYS\[', ) ),
( 'LedCode', ( r'LED(("[^"]+")|(0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'LedCodeStart', ( r'LED\[', ) ),
( 'ScanCode', ( r'S((0x[0-9a-fA-F]+)|([0-9]+))', ) ),
( 'ScanCodeStart', ( r'S\[', ) ),
( 'CodeEnd', ( r'\]', ) ),
( 'String', ( r'"[^"]*"', VERBOSE ) ),
( 'SequenceString', ( r"'[^']*'", ) ),
( 'Operator', ( r'=>|:\+|:-|:|=', ) ),
( 'Comma', ( r',', ) ),
( 'Dash', ( r'-', ) ),
( 'Plus', ( r'\+', ) ),
( 'Parenthesis', ( r'\(|\)', ) ),
( 'None', ( r'None', ) ),
( 'Number', ( r'-?(0x[0-9a-fA-F]+)|(0|([1-9][0-9]*))', VERBOSE ) ),
( 'Name', ( r'[A-Za-z_][A-Za-z_0-9]*', ) ),
( 'VariableContents', ( r'''[^"' ;:=>()]+''', ) ),
( 'EndOfLine', ( r';', ) ),
]
# Tokens to filter out of the token stream
useless = ['Space', 'Comment']
tokens = make_tokenizer( specs )
return [x for x in tokens( string ) if x.type not in useless]
### Parsing ###
## Map Arrays
macros_map = Macros()
variables_dict = Variables()
capabilities_dict = Capabilities()
## Parsing Functions
def make_scanCode( token ):
scanCode = int( token[1:], 0 )
# Check size, to make sure it's valid
if scanCode > 0xFF:
print ( "{0} ScanCode value {1} is larger than 255".format( ERROR, scanCode ) )
raise
return scanCode
def make_hidCode( type, token ):
# If first character is a U, strip
if token[0] == "U":
token = token[1:]
# CONS specifier
elif 'CONS' in token:
token = token[4:]
# SYS specifier
elif 'SYS' in token:
token = token[3:]
# If using string representation of USB Code, do lookup, case-insensitive
if '"' in token:
try:
hidCode = kll_hid_lookup_dictionary[ type ][ token[1:-1].upper() ][1]
except LookupError as err:
print ( "{0} {1} is an invalid USB HID Code Lookup...".format( ERROR, err ) )
raise
else:
# Already tokenized
if type == 'USBCode' and token[0] == 'USB' or type == 'SysCode' and token[0] == 'SYS' or type == 'ConsCode' and token[0] == 'CONS':
hidCode = token[1]
# Convert
else:
hidCode = int( token, 0 )
# Check size if a USB Code, to make sure it's valid
if type == 'USBCode' and hidCode > 0xFF:
print ( "{0} USBCode value {1} is larger than 255".format( ERROR, hidCode ) )
raise
# Return a tuple, identifying which type it is
if type == 'USBCode':
return make_usbCode_number( hidCode )
elif type == 'ConsCode':
return make_consCode_number( hidCode )
elif type == 'SysCode':
return make_sysCode_number( hidCode )
print ( "{0} Unknown HID Specifier '{1}'".format( ERROR, type ) )
raise
def make_usbCode( token ):
return make_hidCode( 'USBCode', token )
def make_consCode( token ):
return make_hidCode( 'ConsCode', token )
def make_sysCode( token ):
return make_hidCode( 'SysCode', token )
def make_hidCode_number( type, token ):
lookup = {
'ConsCode' : 'CONS',
'SysCode' : 'SYS',
'USBCode' : 'USB',
}
return ( lookup[ type ], token )
def make_usbCode_number( token ):
return make_hidCode_number( 'USBCode', token )
def make_consCode_number( token ):
return make_hidCode_number( 'ConsCode', token )
def make_sysCode_number( token ):
return make_hidCode_number( 'SysCode', token )
# Replace key-word with None specifier (which indicates a noneOut capability)
def make_none( token ):
return [[[('NONE', 0)]]]
def make_seqString( token ):
# Shifted Characters, and amount to move by to get non-shifted version
# US ANSI
shiftCharacters = (
( "ABCDEFGHIJKLMNOPQRSTUVWXYZ", 0x20 ),
( "+", 0x12 ),
( "&(", 0x11 ),
( "!#$%<>", 0x10 ),
( "*", 0x0E ),
( ")", 0x07 ),
( '"', 0x05 ),
( ":", 0x01 ),
( "^", -0x10 ),
( "_", -0x18 ),
( "{}|~", -0x1E ),
( "@", -0x32 ),
( "?", -0x38 ),
)
listOfLists = []
shiftKey = kll_hid_lookup_dictionary['USBCode']["SHIFT"]
# Creates a list of USB codes from the string: sequence (list) of combos (lists)
for char in token[1:-1]:
processedChar = char
# Whether or not to create a combo for this sequence with a shift
shiftCombo = False
# Depending on the ASCII character, convert to single character or Shift + character
for pair in shiftCharacters:
if char in pair[0]:
shiftCombo = True
processedChar = chr( ord( char ) + pair[1] )
break
# Do KLL HID Lookup on non-shifted character
# NOTE: Case-insensitive, which is why the shift must be pre-computed
usbCode = kll_hid_lookup_dictionary['USBCode'][ processedChar.upper() ]
# Create Combo for this character, add shift key if shifted
charCombo = []
if shiftCombo:
charCombo = [ [ shiftKey ] ]
charCombo.append( [ usbCode ] )
# Add to list of lists
listOfLists.append( charCombo )
return listOfLists
def make_string( token ):
return token[1:-1]
def make_unseqString( token ):
return token[1:-1]
def make_number( token ):
return int( token, 0 )
# Range can go from high to low or low to high
def make_scanCode_range( rangeVals ):
start = rangeVals[0]
end = rangeVals[1]
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
return list( range( start, end + 1 ) )
# Range can go from high to low or low to high
# Warn on 0-9 for USBCodes (as this does not do what one would expect) TODO
# Lookup USB HID tags and convert to a number
def make_hidCode_range( type, rangeVals ):
# Check if already integers
if isinstance( rangeVals[0], int ):
start = rangeVals[0]
else:
start = make_hidCode( type, rangeVals[0] )[1]
if isinstance( rangeVals[1], int ):
end = rangeVals[1]
else:
end = make_hidCode( type, rangeVals[1] )[1]
# Swap start, end if start is greater than end
if start > end:
start, end = end, start
# Iterate from start to end, and generate the range
listRange = list( range( start, end + 1 ) )
# Convert each item in the list to a tuple
for item in range( len( listRange ) ):
listRange[ item ] = make_hidCode_number( type, listRange[ item ] )
return listRange
def make_usbCode_range( rangeVals ):
return make_hidCode_range( 'USBCode', rangeVals )
def make_sysCode_range( rangeVals ):
return make_hidCode_range( 'SysCode', rangeVals )
def make_consCode_range( rangeVals ):
return make_hidCode_range( 'ConsCode', rangeVals )
## Base Rules
const = lambda x: lambda _: x
unarg = lambda f: lambda x: f(*x)
flatten = lambda list: sum( list, [] )
tokenValue = lambda x: x.value
tokenType = lambda t: some( lambda x: x.type == t ) >> tokenValue
operator = lambda s: a( Token( 'Operator', s ) ) >> tokenValue
parenthesis = lambda s: a( Token( 'Parenthesis', s ) ) >> tokenValue
eol = a( Token( 'EndOfLine', ';' ) )
def listElem( item ):
return [ item ]
def listToTuple( items ):
return tuple( items )
# Flatten only the top layer (list of lists of ...)
def oneLayerFlatten( items ):
mainList = []
for sublist in items:
for item in sublist:
mainList.append( item )
return mainList
# Capability arguments may need to be expanded (e.g. 1 16 bit argument needs to be 2 8 bit arguments for the state machine)
def capArgExpander( items ):
newArgs = []
# For each defined argument in the capability definition
for arg in range( 0, len( capabilities_dict[ items[0] ][1] ) ):
argLen = capabilities_dict[ items[0] ][1][ arg ][1]
num = items[1][ arg ]
byteForm = num.to_bytes( argLen, byteorder='little' ) # XXX Yes, little endian from how the uC structs work
# For each sub-argument, split into byte-sized chunks
for byte in range( 0, argLen ):
newArgs.append( byteForm[ byte ] )
return tuple( [ items[0], tuple( newArgs ) ] )
# Expand ranges of values in the 3rd dimension of the list, to a list of 2nd lists
# i.e. [ sequence, [ combo, [ range ] ] ] --> [ [ sequence, [ combo ] ], <option 2>, <option 3> ]
def optionExpansion( sequences ):
expandedSequences = []
# Total number of combinations of the sequence of combos that needs to be generated
totalCombinations = 1
# List of leaf lists, with number of leaves
maxLeafList = []
# Traverse to the leaf nodes, and count the items in each leaf list
for sequence in sequences:
for combo in sequence:
rangeLen = len( combo )
totalCombinations *= rangeLen
maxLeafList.append( rangeLen )
# Counter list to keep track of which combination is being generated
curLeafList = [0] * len( maxLeafList )
# Generate a list of permuations of the sequence of combos
for count in range( 0, totalCombinations ):
expandedSequences.append( [] ) # Prepare list for adding the new combination
position = 0
# Traverse sequence of combos to generate permuation
for sequence in sequences:
expandedSequences[ -1 ].append( [] )
for combo in sequence:
expandedSequences[ -1 ][ -1 ].append( combo[ curLeafList[ position ] ] )
position += 1
# Increment combination tracker
for leaf in range( 0, len( curLeafList ) ):
curLeafList[ leaf ] += 1
# Reset this position, increment next position (if it exists), then stop
if curLeafList[ leaf ] >= maxLeafList[ leaf ]:
curLeafList[ leaf ] = 0
if leaf + 1 < len( curLeafList ):
curLeafList[ leaf + 1 ] += 1
return expandedSequences
# Converts USB Codes into Capabilities
# These are tuples (<type>, <integer>)
def hidCodeToCapability( items ):
# Items already converted to variants using optionExpansion
for variant in range( 0, len( items ) ):
# Sequence of Combos
for sequence in range( 0, len( items[ variant ] ) ):
for combo in range( 0, len( items[ variant ][ sequence ] ) ):
if items[ variant ][ sequence ][ combo ][0] in backend.requiredCapabilities.keys():
try:
# Use backend capability name and a single argument
items[ variant ][ sequence ][ combo ] = tuple(
[ backend.capabilityLookup( items[ variant ][ sequence ][ combo ][0] ),
tuple( [ hid_lookup_dictionary[ items[ variant ][ sequence ][ combo ] ] ] ) ]
)
except KeyError:
print ( "{0} {1} is an invalid HID lookup value".format( ERROR, items[ variant ][ sequence ][ combo ] ) )
sys.exit( 1 )
return items
# Convert tuple of tuples to list of lists
def listit( t ):
return list( map( listit, t ) ) if isinstance( t, ( list, tuple ) ) else t
# Convert list of lists to tuple of tuples
def tupleit( t ):
return tuple( map( tupleit, t ) ) if isinstance( t, ( tuple, list ) ) else t
## Evaluation Rules
def eval_scanCode( triggers, operator, results ):
# Convert to lists of lists of lists to tuples of tuples of tuples
# Tuples are non-mutable, and can be used has index items
triggers = tuple( tuple( tuple( sequence ) for sequence in variant ) for variant in triggers )
results = tuple( tuple( tuple( sequence ) for sequence in variant ) for variant in results )
# Lookup interconnect id (Current file scope)
# Default to 0 if not specified
if 'ConnectId' not in variables_dict.overallVariables.keys():
id_num = 0
else:
id_num = int( variables_dict.overallVariables['ConnectId'] )
# Iterate over all combinations of triggers and results
for sequence in triggers:
# Convert tuple of tuples to list of lists so each element can be modified
trigger = listit( sequence )
# Create ScanCode entries for trigger
for seq_index, combo in enumerate( sequence ):
for com_index, scancode in enumerate( combo ):
trigger[ seq_index ][ com_index ] = macros_map.scanCodeStore.append( ScanCode( scancode, id_num ) )
# Convert back to a tuple of tuples
trigger = tupleit( trigger )
for result in results:
# Append Case
if operator == ":+":
macros_map.appendScanCode( trigger, result )
# Remove Case
elif operator == ":-":
macros_map.removeScanCode( trigger, result )
# Replace Case
elif operator == ":":
macros_map.replaceScanCode( trigger, result )
def eval_usbCode( triggers, operator, results ):
# Convert to lists of lists of lists to tuples of tuples of tuples
# Tuples are non-mutable, and can be used has index items
triggers = tuple( tuple( tuple( sequence ) for sequence in variant ) for variant in triggers )
results = tuple( tuple( tuple( sequence ) for sequence in variant ) for variant in results )
# Iterate over all combinations of triggers and results
for trigger in triggers:
scanCodes = macros_map.lookupUSBCodes( trigger )
for scanCode in scanCodes:
for result in results:
# Cache assignment until file finishes processing
macros_map.cacheAssignment( operator, scanCode, result )
def eval_variable( name, content ):
# Content might be a concatenation of multiple data types, convert everything into a single string
assigned_content = ""
for item in content:
assigned_content += str( item )
variables_dict.assignVariable( name, assigned_content )
def eval_capability( name, function, args ):
capabilities_dict[ name ] = [ function, args ]
def eval_define( name, cdefine_name ):
variables_dict.defines[ name ] = cdefine_name
map_scanCode = unarg( eval_scanCode )
map_usbCode = unarg( eval_usbCode )
set_variable = unarg( eval_variable )
set_capability = unarg( eval_capability )
set_define = unarg( eval_define )
## Sub Rules
usbCode = tokenType('USBCode') >> make_usbCode
scanCode = tokenType('ScanCode') >> make_scanCode
consCode = tokenType('ConsCode') >> make_consCode
sysCode = tokenType('SysCode') >> make_sysCode
none = tokenType('None') >> make_none
name = tokenType('Name')
number = tokenType('Number') >> make_number
comma = tokenType('Comma')
dash = tokenType('Dash')
plus = tokenType('Plus')
content = tokenType('VariableContents')
string = tokenType('String') >> make_string
unString = tokenType('String') # When the double quotes are still needed for internal processing
seqString = tokenType('SequenceString') >> make_seqString
unseqString = tokenType('SequenceString') >> make_unseqString # For use with variables
# Code variants
code_end = tokenType('CodeEnd')
# Scan Codes
scanCode_start = tokenType('ScanCodeStart')
scanCode_range = number + skip( dash ) + number >> make_scanCode_range
scanCode_listElem = number >> listElem
scanCode_innerList = oneplus( ( scanCode_range | scanCode_listElem ) + skip( maybe( comma ) ) ) >> flatten
scanCode_expanded = skip( scanCode_start ) + scanCode_innerList + skip( code_end )
scanCode_elem = scanCode >> listElem
scanCode_combo = oneplus( ( scanCode_expanded | scanCode_elem ) + skip( maybe( plus ) ) )
scanCode_sequence = oneplus( scanCode_combo + skip( maybe( comma ) ) )
# USB Codes
usbCode_start = tokenType('USBCodeStart')
usbCode_number = number >> make_usbCode_number
usbCode_range = ( usbCode_number | unString ) + skip( dash ) + ( number | unString ) >> make_usbCode_range
usbCode_listElemTag = unString >> make_usbCode
usbCode_listElem = ( usbCode_number | usbCode_listElemTag ) >> listElem
usbCode_innerList = oneplus( ( usbCode_range | usbCode_listElem ) + skip( maybe( comma ) ) ) >> flatten
usbCode_expanded = skip( usbCode_start ) + usbCode_innerList + skip( code_end )
usbCode_elem = usbCode >> listElem
usbCode_combo = oneplus( ( usbCode_expanded | usbCode_elem ) + skip( maybe( plus ) ) ) >> listElem
usbCode_sequence = oneplus( ( usbCode_combo | seqString ) + skip( maybe( comma ) ) ) >> oneLayerFlatten
# Cons Codes
consCode_start = tokenType('ConsCodeStart')
consCode_number = number >> make_consCode_number
consCode_range = ( consCode_number | unString ) + skip( dash ) + ( number | unString ) >> make_consCode_range
consCode_listElemTag = unString >> make_consCode
consCode_listElem = ( consCode_number | consCode_listElemTag ) >> listElem
consCode_innerList = oneplus( ( consCode_range | consCode_listElem ) + skip( maybe( comma ) ) ) >> flatten
consCode_expanded = skip( consCode_start ) + consCode_innerList + skip( code_end )
consCode_elem = consCode >> listElem
# Sys Codes
sysCode_start = tokenType('SysCodeStart')
sysCode_number = number >> make_sysCode_number
sysCode_range = ( sysCode_number | unString ) + skip( dash ) + ( number | unString ) >> make_sysCode_range
sysCode_listElemTag = unString >> make_sysCode
sysCode_listElem = ( sysCode_number | sysCode_listElemTag ) >> listElem
sysCode_innerList = oneplus( ( sysCode_range | sysCode_listElem ) + skip( maybe( comma ) ) ) >> flatten
sysCode_expanded = skip( sysCode_start ) + sysCode_innerList + skip( code_end )
sysCode_elem = sysCode >> listElem
# HID Codes
hidCode_elem = usbCode_expanded | usbCode_elem | sysCode_expanded | sysCode_elem | consCode_expanded | consCode_elem
# Capabilities
capFunc_arguments = many( number + skip( maybe( comma ) ) ) >> listToTuple
capFunc_elem = name + skip( parenthesis('(') ) + capFunc_arguments + skip( parenthesis(')') ) >> capArgExpander >> listElem
capFunc_combo = oneplus( ( hidCode_elem | capFunc_elem ) + skip( maybe( plus ) ) ) >> listElem
capFunc_sequence = oneplus( ( capFunc_combo | seqString ) + skip( maybe( comma ) ) ) >> oneLayerFlatten
# Trigger / Result Codes
triggerCode_outerList = scanCode_sequence >> optionExpansion
triggerUSBCode_outerList = usbCode_sequence >> optionExpansion >> hidCodeToCapability
resultCode_outerList = ( ( capFunc_sequence >> optionExpansion ) | none ) >> hidCodeToCapability
## Main Rules
#| <variable> = <variable contents>;
variable_contents = name | content | string | number | comma | dash | unseqString
variable_expression = name + skip( operator('=') ) + oneplus( variable_contents ) + skip( eol ) >> set_variable
#| <capability name> => <c function>;
capability_arguments = name + skip( operator(':') ) + number + skip( maybe( comma ) )
capability_expression = name + skip( operator('=>') ) + name + skip( parenthesis('(') ) + many( capability_arguments ) + skip( parenthesis(')') ) + skip( eol ) >> set_capability
#| <define name> => <c define>;
define_expression = name + skip( operator('=>') ) + name + skip( eol ) >> set_define
#| <trigger> : <result>;
operatorTriggerResult = operator(':') | operator(':+') | operator(':-')
scanCode_expression = triggerCode_outerList + operatorTriggerResult + resultCode_outerList + skip( eol ) >> map_scanCode
usbCode_expression = triggerUSBCode_outerList + operatorTriggerResult + resultCode_outerList + skip( eol ) >> map_usbCode
def parse( tokenSequence ):
"""Sequence(Token) -> object"""
# Top-level Parser
expression = scanCode_expression | usbCode_expression | variable_expression | capability_expression | define_expression
kll_text = many( expression )
kll_file = maybe( kll_text ) + skip( finished )
return kll_file.parse( tokenSequence )
def processKLLFile( filename ):
with open( filename ) as file:
data = file.read()
tokenSequence = tokenize( data )
#print ( pformat( tokenSequence ) ) # Display tokenization
try:
tree = parse( tokenSequence )
except NoParseError as e:
print("Error parsing %s. %s" % (filename, e.msg), file=sys.stderr)
sys.exit(1)
### Misc Utility Functions ###
def gitRevision( kllPath ):
import subprocess
# Change the path to where kll.py is
origPath = os.getcwd()
os.chdir( kllPath )
# Just in case git can't be found
try:
# Get hash of the latest git commit
revision = subprocess.check_output( ['git', 'rev-parse', 'HEAD'] ).decode()[:-1]
# Get list of files that have changed since the commit
changed = subprocess.check_output( ['git', 'diff-index', '--name-only', 'HEAD', '--'] ).decode().splitlines()
except:
revision = "<no git>"
changed = []
# Change back to the old working directory
os.chdir( origPath )
return revision, changed
### Main Entry Point ###
if __name__ == '__main__':
(baseFiles, defaultFiles, partialFileSets, backend_name, templates, outputs) = processCommandLineArgs()
# Look up git information on the compiler
gitRev, gitChanges = gitRevision( os.path.dirname( os.path.realpath( __file__ ) ) )
# Load backend module
global backend
backend_import = importlib.import_module( "backends.{0}".format( backend_name ) )
backend = backend_import.Backend( templates )
# Process base layout files
for filename in baseFiles:
variables_dict.setCurrentFile( filename )
processKLLFile( filename )
macros_map.completeBaseLayout() # Indicates to macros_map that the base layout is complete
variables_dict.baseLayoutFinished()
# Default combined layer
for filename in defaultFiles:
variables_dict.setCurrentFile( filename )
processKLLFile( filename )
# Apply assignment cache, see 5.1.2 USB Codes for why this is necessary
macros_map.replayCachedAssignments()
# Iterate through additional layers
for partial in partialFileSets:
# Increment layer for each -p option
macros_map.addLayer()
variables_dict.incrementLayer() # DefaultLayer is layer 0
# Iterate and process each of the file in the layer
for filename in partial:
variables_dict.setCurrentFile( filename )
processKLLFile( filename )
# Apply assignment cache, see 5.1.2 USB Codes for why this is necessary
macros_map.replayCachedAssignments()
# Remove un-marked keys to complete the partial layer
macros_map.removeUnmarked()
# Do macro correlation and transformation
macros_map.generate()
# Process needed templating variables using backend
backend.process(
capabilities_dict,
macros_map,
variables_dict,
gitRev,
gitChanges
)
# Generate output file using template and backend
backend.generate( outputs )
# Successful Execution
sys.exit( 0 )
| gpl-3.0 |
santoshphilip/eppy | tests/test_simpleread.py | 1 | 4640 | """py.test for simpleread.py"""
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from io import StringIO
import eppy.simpleread as simpleread
def test_idf2txt():
"""py.test for idf2txt"""
data = (
(
"""
VERSION,
7.3; !- Version Identifier
SIMULATIONCONTROL,
Yes, !- Do Zone Sizing Calculation
Yes, !- Do System Sizing Calculation
Yes, !- Do Plant Sizing Calculation
No, !- Run Simulation for Sizing Periods
Yes; !- Run Simulation for Weather File Run Periods
BUILDING,
Empire State Building, !- Name
30.0, !- North Axis
City, !- Terrain
0.04, !- Loads Convergence Tolerance Value
0.4, !- Temperature Convergence Tolerance Value
FullExterior, !- Solar Distribution
25, !- Maximum Number of Warmup Days
6; !- Minimum Number of Warmup Days
SITE:LOCATION,
CHICAGO_IL_USA TMY2-94846, !- Name
41.78, !- Latitude
-87.75, !- Longitude
-6.0, !- Time Zone
190.0; !- Elevation
""",
""";
BUILDING,
Empire State Building,
30.0,
City,
0.04,
0.4,
FullExterior,
25.0,
6.0;
SIMULATIONCONTROL,
Yes,
Yes,
Yes,
No,
Yes;
SITE:LOCATION,
CHICAGO_IL_USA TMY2-94846,
41.78,
-87.75,
-6.0,
190.0;
VERSION,
7.3;
""",
), # intxt, outtxt
)
for intxt, outtxt in data:
result = simpleread.idf2txt(intxt)
assert result == outtxt
def test_idfreadtest():
"""py.test for idfreadtest"""
data = (
(
"""!IDD_Version 7.2.0.006
Version,
\\unique-object
\\format singleLine
A1 ; \\field Version Identifier
SimulationControl,
\\unique-object
A1, \\field Do Zone Sizing Calculation
A2, \\field Do System Sizing Calculation
A3, \\field Do Plant Sizing Calculation
A4, \\field Run Simulation for Sizing Periods
A5; \\field Run Simulation for Weather File Run Periods
Building,
\\unique-object
A1 , \\field Name
N1 , \\field North Axis
A2 , \\field Terrain
N2 , \\field Loads Convergence Tolerance Value
N3 , \\field Temperature Convergence Tolerance Value
A3 , \\field Solar Distribution
N4 , \\field Maximum Number of Warmup Days
N5 ; \\field Minimum Number of Warmup Days
Site:Location,
\\unique-object
A1 , \\field Name
N1 , \\field Latitude
N2 , \\field Longitude
N3 , \\field Time Zone
N4 ; \\field Elevation
""",
"""
VERSION,
7.3; !- Version Identifier
SIMULATIONCONTROL,
Yes, !- Do Zone Sizing Calculation
Yes, !- Do System Sizing Calculation
Yes, !- Do Plant Sizing Calculation
No, !- Run Simulation for Sizing Periods
Yes; !- Run Simulation for Weather File Run Periods
BUILDING,
Empire State Building, !- Name
30.0, !- North Axis
City, !- Terrain
0.04, !- Loads Convergence Tolerance Value
0.4, !- Temperature Convergence Tolerance Value
FullExterior, !- Solar Distribution
25, !- Maximum Number of Warmup Days
6; !- Minimum Number of Warmup Days
SITE:LOCATION,
CHICAGO_IL_USA TMY2-94846, !- Name
41.78, !- Latitude
-87.75, !- Longitude
-6.0, !- Time Zone
190.0; !- Elevation
""",
), # iddtxt, idftxt
)
for iddtxt, idftxt in data:
iddhandle = StringIO(iddtxt)
idfhandle1 = StringIO(idftxt)
idfhandle2 = StringIO(idftxt)
result = simpleread.idfreadtest(iddhandle, idfhandle1, idfhandle2)
assert result == True
| mit |
ChandraAddala/blueflood | contrib/grinder/scripts/annotationsingest.py | 1 | 2675 | import random
try:
from com.xhaus.jyson import JysonCodec as json
except ImportError:
import json
from utils import *
from net.grinder.script import Test
from net.grinder.plugin.http import HTTPRequest
class AnnotationsIngestThread(AbstractThread):
# The list of metric numbers for all threads in this worker
annotations = []
# Grinder test reporting infrastructure
test1 = Test(2, "Annotations Ingest test")
request = HTTPRequest()
test1.record(request)
@classmethod
def create_metrics(cls, agent_number):
""" Generate all the annotations for this worker
"""
cls.annotations = generate_metrics_tenants(
default_config['annotations_num_tenants'],
default_config['annotations_per_tenant'], agent_number,
default_config['num_nodes'],
cls.generate_annotations_for_tenant)
@classmethod
def num_threads(cls):
return default_config['annotations_concurrency']
@classmethod
def generate_annotations_for_tenant(cls, tenant_id,
annotations_per_tenant):
l = []
for x in range(annotations_per_tenant):
l.append([tenant_id, x])
return l
def __init__(self, thread_num):
AbstractThread.__init__(self, thread_num)
# Initialize the "slice" of the metrics to be sent by this thread
start, end = generate_job_range(len(self.annotations),
self.num_threads(), thread_num)
self.slice = self.annotations[start:end]
def generate_annotation(self, time, metric_id):
metric_name = generate_metric_name(metric_id)
return {'what': 'annotation ' + metric_name,
'when': time,
'tags': 'tag',
'data': 'data'}
def generate_payload(self, time, metric_id):
payload = self.generate_annotation(time, metric_id)
return json.dumps(payload)
def ingest_url(self, tenant_id):
return "%s/v2.0/%s/events" % (default_config['url'], tenant_id)
def make_request(self, logger):
if len(self.slice) == 0:
logger("Warning: no work for current thread")
self.sleep(1000000)
return None
self.check_position(logger, len(self.slice))
batch = self.slice[self.position]
tenant_id = batch[0]
metric_id = batch[1]
payload = self.generate_payload(int(self.time()), metric_id)
self.position += 1
result = self.request.POST(self.ingest_url(tenant_id), payload)
return result
ThreadManager.add_type(AnnotationsIngestThread)
| apache-2.0 |
isoriss123/namebench | nb_third_party/dns/exception.py | 250 | 1319 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Common DNS Exceptions."""
class DNSException(Exception):
"""Abstract base class shared by all dnspython exceptions."""
pass
class FormError(DNSException):
"""DNS message is malformed."""
pass
class SyntaxError(DNSException):
"""Text input is malformed."""
pass
class UnexpectedEnd(SyntaxError):
"""Raised if text input ends unexpectedly."""
pass
class TooBig(DNSException):
"""The message is too big."""
pass
class Timeout(DNSException):
"""The operation timed out."""
pass
| apache-2.0 |
DaniilLeksin/gc | wx/tools/Editra/src/extern/pygments/lexers/__init__.py | 72 | 7269 | # -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, bytes
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + LEXERS.keys()
_lexer_cache = {}
def _load_lexers(module_name):
"""
Load a lexer (and all others in the module too).
"""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""
Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in LEXERS.itervalues():
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""
Lookup a lexer class by name. Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.itervalues():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""
Get a lexer by an alias.
"""
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.itervalues():
if _alias in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_for_filename(_fn, code=None, **options):
"""
Get a lexer for a filename. If multiple lexers match the filename
pattern, use ``analyze_text()`` to figure out which one is more
appropriate.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.itervalues():
for filename in filenames:
if fnmatch.fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append(_lexer_cache[name])
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch.fnmatch(fn, filename):
matches.append(cls)
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = code.decode('latin1')
def get_rating(cls):
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
d = cls.analyse_text(code)
#print "Got %r from %r" % (d, cls)
return d
if code:
matches.sort(key=get_rating)
if matches:
#print "Possible lexers, after sort:", matches
return matches[-1](**options)
raise ClassNotFound('no lexer for filename %r found' % _fn)
def get_lexer_for_mimetype(_mime, **options):
"""
Get a lexer for a mimetype.
"""
for modname, name, _, _, mimetypes in LEXERS.itervalues():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses():
"""
Return an iterator over all lexer classes.
"""
for module_name, name, _, _, _ in LEXERS.itervalues():
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = None
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
primary = lexer
for filename in lexer.alias_filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
result.sort()
if not result[-1][0] and primary is not None:
return primary(**options)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""
Guess a lexer by strong distinctions in the text (eg, shebang).
"""
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules['pygments.lexers']
newmod = _automodule('pygments.lexers')
newmod.__dict__.update(oldmod.__dict__)
sys.modules['pygments.lexers'] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
| apache-2.0 |
blackzw/openwrt_sdk_dev1 | staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/Bindings.py | 130 | 3295 | """Define the menu contents, hotkeys, and event bindings.
There is additional configuration information in the EditorWindow class (and
subclasses): the menus are created there based on the menu_specs (class)
variable, and menus not created are silently skipped in the code here. This
makes it possible, for example, to define a Debug menu which is only present in
the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
import sys
from idlelib.configHandler import idleConf
from idlelib import macosxSupport
menudefs = [
# underscore prefixes character to underscore
('file', [
('_New Window', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Class _Browser', '<<open-class-browser>>'),
('_Path Browser', '<<open-path-browser>>'),
None,
('_Save', '<<save-window>>'),
('Save _As...', '<<save-window-as-file>>'),
('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
None,
('Prin_t Window', '<<print-window>>'),
None,
('_Close', '<<close-window>>'),
('E_xit', '<<close-all-windows>>'),
]),
('edit', [
('_Undo', '<<undo>>'),
('_Redo', '<<redo>>'),
None,
('Cu_t', '<<cut>>'),
('_Copy', '<<copy>>'),
('_Paste', '<<paste>>'),
('Select _All', '<<select-all>>'),
None,
('_Find...', '<<find>>'),
('Find A_gain', '<<find-again>>'),
('Find _Selection', '<<find-selection>>'),
('Find in Files...', '<<find-in-files>>'),
('R_eplace...', '<<replace>>'),
('Go to _Line', '<<goto-line>>'),
]),
('format', [
('_Indent Region', '<<indent-region>>'),
('_Dedent Region', '<<dedent-region>>'),
('Comment _Out Region', '<<comment-region>>'),
('U_ncomment Region', '<<uncomment-region>>'),
('Tabify Region', '<<tabify-region>>'),
('Untabify Region', '<<untabify-region>>'),
('Toggle Tabs', '<<toggle-tabs>>'),
('New Indent Width', '<<change-indentwidth>>'),
]),
('run', [
('Python Shell', '<<open-python-shell>>'),
]),
('shell', [
('_View Last Restart', '<<view-restart>>'),
('_Restart Shell', '<<restart-shell>>'),
]),
('debug', [
('_Go to File/Line', '<<goto-file-line>>'),
('!_Debugger', '<<toggle-debugger>>'),
('_Stack Viewer', '<<open-stack-viewer>>'),
('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'),
]),
('options', [
('_Configure IDLE...', '<<open-config-dialog>>'),
None,
]),
('help', [
('_About IDLE', '<<about-idle>>'),
None,
('_IDLE Help', '<<help>>'),
('Python _Docs', '<<python-docs>>'),
]),
]
if macosxSupport.runningAsOSXApp():
# Running as a proper MacOS application bundle. This block restructures
# the menus a little to make them conform better to the HIG.
quitItem = menudefs[0][1][-1]
closeItem = menudefs[0][1][-2]
# Remove the last 3 items of the file menu: a separator, close window and
# quit. Close window will be reinserted just above the save item, where
# it should be according to the HIG. Quit is in the application menu.
del menudefs[0][1][-3:]
menudefs[0][1].insert(6, closeItem)
# Remove the 'About' entry from the help menu, it is in the application
# menu
del menudefs[-1][1][0:2]
default_keydefs = idleConf.GetCurrentKeySet()
del sys
| gpl-2.0 |
campbe13/openhatch | vendor/packages/django-tastypie/tastypie/paginator.py | 46 | 7203 | from __future__ import unicode_literals
from django.conf import settings
from django.utils import six
from tastypie.exceptions import BadRequest
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
class Paginator(object):
"""
Limits result sets down to sane amounts for passing to the client.
This is used in place of Django's ``Paginator`` due to the way pagination
works. ``limit`` & ``offset`` (tastypie) are used in place of ``page``
(Django) so none of the page-related calculations are necessary.
This implementation also provides additional details like the
``total_count`` of resources seen and convenience links to the
``previous``/``next`` pages of data as available.
"""
def __init__(self, request_data, objects, resource_uri=None, limit=None, offset=0, max_limit=1000, collection_name='objects'):
"""
Instantiates the ``Paginator`` and allows for some configuration.
The ``request_data`` argument ought to be a dictionary-like object.
May provide ``limit`` and/or ``offset`` to override the defaults.
Commonly provided ``request.GET``. Required.
The ``objects`` should be a list-like object of ``Resources``.
This is typically a ``QuerySet`` but can be anything that
implements slicing. Required.
Optionally accepts a ``limit`` argument, which specifies how many
items to show at a time. Defaults to ``None``, which is no limit.
Optionally accepts an ``offset`` argument, which specifies where in
the ``objects`` to start displaying results from. Defaults to 0.
Optionally accepts a ``max_limit`` argument, which the upper bound
limit. Defaults to ``1000``. If you set it to 0 or ``None``, no upper
bound will be enforced.
"""
self.request_data = request_data
self.objects = objects
self.limit = limit
self.max_limit = max_limit
self.offset = offset
self.resource_uri = resource_uri
self.collection_name = collection_name
def get_limit(self):
"""
Determines the proper maximum number of results to return.
In order of importance, it will use:
* The user-requested ``limit`` from the GET parameters, if specified.
* The object-level ``limit`` if specified.
* ``settings.API_LIMIT_PER_PAGE`` if specified.
Default is 20 per page.
"""
limit = self.request_data.get('limit', self.limit)
if limit is None:
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
try:
limit = int(limit)
except ValueError:
raise BadRequest("Invalid limit '%s' provided. Please provide a positive integer." % limit)
if limit < 0:
raise BadRequest("Invalid limit '%s' provided. Please provide a positive integer >= 0." % limit)
if self.max_limit and (not limit or limit > self.max_limit):
# If it's more than the max, we're only going to return the max.
# This is to prevent excessive DB (or other) load.
return self.max_limit
return limit
def get_offset(self):
"""
Determines the proper starting offset of results to return.
It attempts to use the user-provided ``offset`` from the GET parameters,
if specified. Otherwise, it falls back to the object-level ``offset``.
Default is 0.
"""
offset = self.offset
if 'offset' in self.request_data:
offset = self.request_data['offset']
try:
offset = int(offset)
except ValueError:
raise BadRequest("Invalid offset '%s' provided. Please provide an integer." % offset)
if offset < 0:
raise BadRequest("Invalid offset '%s' provided. Please provide a positive integer >= 0." % offset)
return offset
def get_slice(self, limit, offset):
"""
Slices the result set to the specified ``limit`` & ``offset``.
"""
if limit == 0:
return self.objects[offset:]
return self.objects[offset:offset + limit]
def get_count(self):
"""
Returns a count of the total number of objects seen.
"""
try:
return self.objects.count()
except (AttributeError, TypeError):
# If it's not a QuerySet (or it's ilk), fallback to ``len``.
return len(self.objects)
def get_previous(self, limit, offset):
"""
If a previous page is available, will generate a URL to request that
page. If not available, this returns ``None``.
"""
if offset - limit < 0:
return None
return self._generate_uri(limit, offset-limit)
def get_next(self, limit, offset, count):
"""
If a next page is available, will generate a URL to request that
page. If not available, this returns ``None``.
"""
if offset + limit >= count:
return None
return self._generate_uri(limit, offset+limit)
def _generate_uri(self, limit, offset):
if self.resource_uri is None:
return None
try:
# QueryDict has a urlencode method that can handle multiple values for the same key
request_params = self.request_data.copy()
if 'limit' in request_params:
del request_params['limit']
if 'offset' in request_params:
del request_params['offset']
request_params.update({'limit': limit, 'offset': offset})
encoded_params = request_params.urlencode()
except AttributeError:
request_params = {}
for k, v in self.request_data.items():
if isinstance(v, six.text_type):
request_params[k] = v.encode('utf-8')
else:
request_params[k] = v
if 'limit' in request_params:
del request_params['limit']
if 'offset' in request_params:
del request_params['offset']
request_params.update({'limit': limit, 'offset': offset})
encoded_params = urlencode(request_params)
return '%s?%s' % (
self.resource_uri,
encoded_params
)
def page(self):
"""
Generates all pertinent data about the requested page.
Handles getting the correct ``limit`` & ``offset``, then slices off
the correct set of results and returns all pertinent metadata.
"""
limit = self.get_limit()
offset = self.get_offset()
count = self.get_count()
objects = self.get_slice(limit, offset)
meta = {
'offset': offset,
'limit': limit,
'total_count': count,
}
if limit:
meta['previous'] = self.get_previous(limit, offset)
meta['next'] = self.get_next(limit, offset, count)
return {
self.collection_name: objects,
'meta': meta,
}
| agpl-3.0 |
tizz98/lightblue-0.4 | src/series60/_obexcommon.py | 130 | 18262 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
import _lightbluecommon
__all__ = ('OBEXResponse', 'OBEXError',
'CONTINUE', 'OK', 'CREATED', 'ACCEPTED', 'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT', 'RESET_CONTENT', 'PARTIAL_CONTENT',
'MULTIPLE_CHOICES', 'MOVED_PERMANENTLY', 'MOVED_TEMPORARILY', 'SEE_OTHER',
'NOT_MODIFIED', 'USE_PROXY',
'BAD_REQUEST', 'UNAUTHORIZED', 'PAYMENT_REQUIRED', 'FORBIDDEN',
'NOT_FOUND', 'METHOD_NOT_ALLOWED', 'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED', 'REQUEST_TIME_OUT', 'CONFLICT', 'GONE',
'LENGTH_REQUIRED', 'PRECONDITION_FAILED', 'REQUESTED_ENTITY_TOO_LARGE',
'REQUEST_URL_TOO_LARGE', 'UNSUPPORTED_MEDIA_TYPE',
'INTERNAL_SERVER_ERROR', 'NOT_IMPLEMENTED', 'BAD_GATEWAY',
'SERVICE_UNAVAILABLE', 'GATEWAY_TIMEOUT', 'HTTP_VERSION_NOT_SUPPORTED',
'DATABASE_FULL', 'DATABASE_LOCKED')
class OBEXError(_lightbluecommon.BluetoothError):
"""
Generic exception raised for OBEX-related errors.
"""
pass
class OBEXResponse:
"""
Contains the OBEX response received from an OBEX server.
When an OBEX client sends a request, the OBEX server sends back a response
code (to indicate whether the request was successful) and a set of response
headers (to provide other useful information).
For example, if a client sends a 'Get' request to retrieve a file, the
client might get a response like this:
>>> import lightblue
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> response = client.get({"name": "file.txt"}, file("file.txt", "w"))
>>> print response
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={'length': 35288}>
You can get the response code and response headers in different formats:
>>> print response.reason
'OK' # a string description of the response code
>>> print response.code
32 # the response code (e.g. this is 0x20)
>>> print response.headers
{'length': 35288} # the headers, with string keys
>>> print response.rawheaders
{195: 35288} # the headers, with raw header ID keys
>>>
Note how the 'code' attribute does not have the final bit set - e.g. for
OK/Success, the response code is 0x20, not 0xA0.
The lightblue.obex module defines constants for response code values (e.g.
lightblue.obex.OK, lightblue.obex.FORBIDDEN, etc.).
"""
def __init__(self, code, rawheaders):
self.__code = code
self.__reason = _OBEX_RESPONSES.get(code, "Unknown response code")
self.__rawheaders = rawheaders
self.__headers = None
code = property(lambda self: self.__code,
doc='The response code, without the final bit set.')
reason = property(lambda self: self.__reason,
doc='A string description of the response code.')
rawheaders = property(lambda self: self.__rawheaders,
doc='The response headers, as a dictionary with header ID (unsigned byte) keys.')
def getheader(self, header, default=None):
'''
Returns the response header value for the given header, which may
either be a string (not case-sensitive) or the raw byte
value of the header ID.
Returns the specified default value if the header is not present.
'''
if isinstance(header, types.StringTypes):
return self.headers.get(header.lower(), default)
return self.__rawheaders.get(header, default)
def __getheaders(self):
if self.__headers is None:
self.__headers = {}
for headerid, value in self.__rawheaders.items():
if headerid in _HEADER_IDS_TO_STRINGS:
self.__headers[_HEADER_IDS_TO_STRINGS[headerid]] = value
else:
self.__headers["0x%02x" % headerid] = value
return self.__headers
headers = property(__getheaders,
doc='The response headers, as a dictionary with string keys.')
def __repr__(self):
return "<OBEXResponse reason='%s' code=0x%02x (0x%02x) headers=%s>" % \
(self.__reason, self.__code, (self.__code | 0x80), str(self.headers))
try:
import datetime
# as from python docs example
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
except:
pass # no datetime on pys60
_LOCAL_TIME_FORMAT = "%Y%m%dT%H%M%S"
_UTC_TIME_FORMAT = _LOCAL_TIME_FORMAT + "Z"
def _datetimefromstring(s):
import time
if s[-1:] == "Z":
# add UTC() instance as tzinfo
args = (time.strptime(s, _UTC_TIME_FORMAT)[0:6]) + (0, UTC())
return datetime.datetime(*args)
else:
return datetime.datetime(*(time.strptime(s, _LOCAL_TIME_FORMAT)[0:6]))
_HEADER_STRINGS_TO_IDS = {
"count": 0xc0,
"name": 0x01,
"type": 0x42,
"length": 0xc3,
"time": 0x44,
"description": 0x05,
"target": 0x46,
"http": 0x47,
"who": 0x4a,
"connection-id": 0xcb,
"application-parameters": 0x4c,
"authentication-challenge": 0x4d,
"authentication-response": 0x4e,
"creator-id": 0xcf,
"wan-uuid": 0x50,
"object-class": 0x51,
"session-parameters": 0x52,
"session-sequence-number": 0x93
}
_HEADER_IDS_TO_STRINGS = {}
for key, value in _HEADER_STRINGS_TO_IDS.items():
_HEADER_IDS_TO_STRINGS[value] = key
assert len(_HEADER_IDS_TO_STRINGS) == len(_HEADER_STRINGS_TO_IDS)
# These match the associated strings in httplib.responses, since OBEX response
# codes are matched to HTTP status codes (except for 0x60 and 0x61).
# Note these are the responses *without* the final bit set.
_OBEX_RESPONSES = {
0x10: "Continue",
0x20: "OK",
0x21: "Created",
0x22: "Accepted",
0x23: "Non-Authoritative Information",
0x24: "No Content",
0x25: "Reset Content",
0x26: "Partial Content",
0x30: "Multiple Choices",
0x31: "Moved Permanently",
0x32: "Moved Temporarily", # but is 'Found' (302) in httplib.response???
0x33: "See Other",
0x34: "Not Modified",
0x35: "Use Proxy",
0x40: "Bad Request",
0x41: "Unauthorized",
0x42: "Payment Required",
0x43: "Forbidden",
0x44: "Not Found",
0x45: "Method Not Allowed",
0x46: "Not Acceptable",
0x47: "Proxy Authentication Required",
0x48: "Request Timeout",
0x49: "Conflict",
0x4A: "Gone",
0x48: "Length Required",
0x4C: "Precondition Failed",
0x4D: "Request Entity Too Large",
0x4E: "Request-URI Too Long",
0x4F: "Unsupported Media Type",
0x50: "Internal Server Error",
0x51: "Not Implemented",
0x52: "Bad Gateway",
0x53: "Service Unavailable",
0x54: "Gateway Timeout",
0x55: "HTTP Version Not Supported",
0x60: "Database Full",
0x61: "Database Locked"
}
_obexclientclassdoc = \
"""
An OBEX client class. (Note this is not available on Python for Series 60.)
For example, to connect to an OBEX server and send a file:
>>> import lightblue
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> client.connect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> client.put({"name": "photo.jpg"}, file("photo.jpg", "rb"))
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> client.disconnect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
A client must call connect() to establish a connection before it can send
any other requests.
The connect(), disconnect(), put(), delete(), get() and setpath() methods
all accept the request headers as a dictionary of header-value mappings. The
request headers are used to provide the server with additional information
for the request. For example, this sends a Put request that includes Name,
Type and Length headers in the request headers, to provide details about
the transferred file:
>>> f = file("file.txt")
>>> client.put({"name": "file.txt", "type": "text/plain",
... "length": 5192}, f)
>>>
Here is a list of all the different string header keys that you can use in
the request headers, and the expected type of the value for each header:
- "name" -> a string
- "type" -> a string
- "length" -> an int
- "time" -> a datetime object from the datetime module
- "description" -> a string
- "target" -> a string or buffer
- "http" -> a string or buffer
- "who" -> a string or buffer
- "connection-id" -> an int
- "application-parameters" -> a string or buffer
- "authentication-challenge" -> a string or buffer
- "authentication-response" -> a string or buffer
- "creator-id" -> an int
- "wan-uuid" -> a string or buffer
- "object-class" -> a string or buffer
- "session-parameters" -> a string or buffer
- "session-sequence-number" -> an int less than 256
(The string header keys are not case-sensitive.)
Alternatively, you can use raw header ID values instead of the above
convenience strings. So, the previous example can be rewritten as:
>>> client.put({0x01: "file.txt", 0x42: "text/plain", 0xC3: 5192},
... fileobject)
>>>
This is also useful for inserting custom headers. For example, a PutImage
request for a Basic Imaging client requires the Img-Descriptor (0x71)
header:
>>> client.put({"type": "x-bt/img-img",
... "name": "photo.jpg",
... 0x71: '<image-descriptor version="1.0"><image encoding="JPEG" pixel="160*120" size="37600"/></image-descriptor>'},
... file('photo.jpg', 'rb'))
>>>
Notice that the connection-id header is not sent, because this is
automatically included by OBEXClient in the request headers if a
connection-id was received in a previous Connect response.
See the included src/examples/obex_ftp_client.py for an example of using
OBEXClient to implement a File Transfer client for browsing the files on a
remote device.
"""
_obexclientdocs = {
"__init__":
"""
Creates an OBEX client.
Arguments:
- address: the address of the remote device
- channel: the RFCOMM channel of the remote OBEX service
""",
"connect":
"""
Establishes the Bluetooth connection to the remote OBEX server and sends
a Connect request to open the OBEX session. Returns an OBEXResponse
instance containing the server response.
Raises lightblue.obex.OBEXError if the session is already connected, or if
an error occurs during the request.
If the server refuses the Connect request (i.e. if it sends a response code
other than OK/Success), the Bluetooth connection will be closed.
Arguments:
- headers={}: the headers to send for the Connect request
""",
"disconnect":
"""
Sends a Disconnect request to end the OBEX session and closes the Bluetooth
connection to the remote OBEX server. Returns an OBEXResponse
instance containing the server response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers={}: the headers to send for the request
""",
"put":
"""
Sends a Put request. Returns an OBEXResponse instance containing the
server response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers: the headers to send for the request
- fileobj: a file-like object containing the file data to be sent for
the request
For example, to send a file named 'photo.jpg', using the request headers
to notify the server of the file's name, MIME type and length:
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> client.connect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> client.put({"name": "photo.jpg", "type": "image/jpeg",
"length": 28566}, file("photo.jpg", "rb"))
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
""",
"delete":
"""
Sends a Put-Delete request in order to delete a file or folder on the remote
server. Returns an OBEXResponse instance containing the server response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers: the headers to send for the request - you should use the
'name' header to specify the file you want to delete
If the file on the server can't be deleted because it's a read-only file,
you might get an 'Unauthorized' response, like this:
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> client.connect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> client.delete({"name": "random_file.txt"})
<OBEXResponse reason='Unauthorized' code=0x41 (0xc1) headers={}>
>>>
""",
"get":
"""
Sends a Get request. Returns an OBEXResponse instance containing the server
response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers: the headers to send for the request - you should use these
to specify the file you want to retrieve
- fileobj: a file-like object, to which the received data will be
written
An example:
>>> client = lightblue.obex.OBEXClient("aa:bb:cc:dd:ee:ff", 10)
>>> client.connect()
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>> f = file("received_file.txt", "w+")
>>> client.get({"name": "testfile.txt"}, f)
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={'length':9}>
>>> f.seek(0)
>>> f.read()
'test file'
>>>
""",
"setpath":
"""
Sends a SetPath request in order to set the "current path" on the remote
server for file transfers. Returns an OBEXResponse instance containing the
server response.
Raises lightblue.obex.OBEXError if connect() has not been called, or if an
error occurs during the request.
Note that you don't need to send any connection-id headers - this is
automatically included if the client received one in a Connect response.
Arguments:
- headers: the headers to send for the request - you should use the
'name' header to specify the directory you want to change to
- cdtoparent=False: True if the remote server should move up one
directory before applying the specified directory (i.e. 'cd
../dirname')
- createdirs=False: True if the specified directory should be created
if it doesn't exist (if False, the server will return an error
response if the directory doesn't exist)
For example:
# change to the "images" subdirectory
>>> client.setpath({"name": "images"})
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
# change to the parent directory
>>> client.setpath({}, cdtoparent=True)
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
# create a subdirectory "My_Files"
>>> client.setpath({"name": "My_Files"}, createdirs=True)
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
# change to the root directory - you can use an empty "name" header
# to specify this
>>> client.setpath({"name": ""})
<OBEXResponse reason='OK' code=0x20 (0xa0) headers={}>
>>>
"""
}
# response constants
CONTINUE = 0x10
OK = 0x20
CREATED = 0x21
ACCEPTED = 0x22
NON_AUTHORITATIVE_INFORMATION = 0x23
NO_CONTENT = 0x24
RESET_CONTENT = 0x25
PARTIAL_CONTENT = 0x26
MULTIPLE_CHOICES = 0x30
MOVED_PERMANENTLY = 0x31
MOVED_TEMPORARILY = 0x32
SEE_OTHER = 0x33
NOT_MODIFIED = 0x34
USE_PROXY = 0x35
BAD_REQUEST = 0x40
UNAUTHORIZED = 0x41
PAYMENT_REQUIRED = 0x42
FORBIDDEN = 0x43
NOT_FOUND = 0x44
METHOD_NOT_ALLOWED = 0x45
NOT_ACCEPTABLE = 0x46
PROXY_AUTHENTICATION_REQUIRED = 0x47
REQUEST_TIME_OUT = 0x48
CONFLICT = 0x49
GONE = 0x4A
LENGTH_REQUIRED = 0x4B
PRECONDITION_FAILED = 0x4C
REQUESTED_ENTITY_TOO_LARGE = 0x4D
REQUEST_URL_TOO_LARGE = 0x4E
UNSUPPORTED_MEDIA_TYPE = 0x4F
INTERNAL_SERVER_ERROR = 0x50
NOT_IMPLEMENTED = 0x51
BAD_GATEWAY = 0x52
SERVICE_UNAVAILABLE = 0x53
GATEWAY_TIMEOUT = 0x54
HTTP_VERSION_NOT_SUPPORTED = 0x55
DATABASE_FULL = 0x60
DATABASE_LOCKED = 0x61
| gpl-3.0 |
anaselli/libyui | libyui-bindings/swig/python/examples/widgets.py | 5 | 10905 | #coding:utf-8
#############################################################################
#
# widgets.py - Demonstration of widgets available in python-libyui
#
# License: GPLv2
# Author: Jan-Simon Möller, [email protected]
#############################################################################
# ensure we're using the latest build, if called from our build environment
import sys
sys.path.insert(0,'../../../build/swig/python')
###########
# imports #
###########
import yui
import locale
####################################
# LOCALE (important for TERMINAL!) #
####################################
# set the locale to de/utf-8
locale.setlocale(locale.LC_ALL, "")
log = yui.YUILog.instance()
log.setLogFileName("debug.log")
log.enableDebugLogging( True )
appl = yui.YUI.application()
appl.setLanguage( "de", "UTF-8" )
#appl.setConsoleFont(magic, font, screenMap, unicodeMap, language)
# see /usr/share/YaST2/data/consolefonts.ycp
appl.setConsoleFont("(K", "lat9w-16.psfu", "trivial", "", "en_US.UTF-8")
#################
# class widgets #
#################
class WIDGETS(object):
"""
Main class for the 'widgets' demo
"""
def __init__(self, myavwidgets):
"""
Init/Constructor for the 'widgets' demo
"""
self.factory = yui.YUI.widgetFactory()
self.dialog = self.factory.createMainDialog()
self.avwidgets = myavwidgets.copy()
# create the main gui
# +---+-----------+
# | s | display |
# | e +-----------+
# | l |description|
# | b +-----------+
# | x | code |
# +---+-----------+
self.mainhbox = self.factory.createHBox(self.dialog)
self.mainvbox = self.factory.createVBox(self.mainhbox)
self.mainvbox.setWeight(0,20)
self.selbox = self.factory.createSelectionBox(self.mainvbox, "Widgets")
self.selbox.setNotify()
self.closebutton = self.factory.createPushButton(self.mainvbox, "&Close")
self.boxright = self.factory.createVBox(self.mainhbox)
self.boxright.setWeight(0,80)
self.framedisplay = self.factory.createFrame(self.boxright, "View")
self.framedisplay.setWeight(1,33)
self.framedisplay.setStretchable(0,True)
self.framedisplayminsize = self.factory.createMinSize(self.framedisplay, 5, 5)
self.display = self.factory.createReplacePoint(self.framedisplayminsize) # here we change the widget
self.displaychild_ = {}
self.framedescription = self.factory.createFrame(self.boxright, "Description")
self.framedescription.setWeight(1,33)
self.framedescription.setStretchable(0,True)
self.description = self.factory.createReplacePoint(self.framedescription) # here we change the widget
self.descriptionchild_ = {}
self.framecode = self.factory.createFrame(self.boxright, "Code")
self.framecode.setWeight(1,33)
self.framecode.setStretchable(0,True)
self.code = self.factory.createReplacePoint(self.framecode) # here we change the widget
self.codechild_ = {}
self.updateselbox() # import available widgets into display
self.updatedisplay()
self.updatedescription()
self.updatecode()
def updateselbox(self):
for i in self.avwidgets.keys():
self.selbox.addItem(i)
def updatedisplay(self):
self.display.deleteChildren() # remove old widgets
selected = self.selbox.selectedItem().label()
#self.displaychild_[selected] = self.factory.createPushButton(self.display, "&OK") #self.avwidgets[selected][0]
#print type(self.displaychild_[selected])
widgettodisplay = "self.displaychild_[selected] = self.factory."+self.avwidgets[selected][0]
exec widgettodisplay
if self.avwidgets[selected][1]:
widgettodisplay1 = "self.displaychild_[selected]"+self.avwidgets[selected][1]
exec widgettodisplay1
if self.avwidgets[selected][2]:
widgettodisplay2 = "self.displaychild_[selected]"+self.avwidgets[selected][2]
exec widgettodisplay2
self.dialog.recalcLayout()
self.display.showChild()
def updatedescription(self):
self.description.deleteChildren()
selected = self.selbox.selectedItem().label()
text = self.avwidgets[selected][3]
self.descriptionchild_[selected] = self.factory.createRichText(self.description, str(text))
#exec widgettodescribe
self.dialog.recalcLayout()
self.description.showChild()
def updatecode(self):
self.code.deleteChildren()
selected = self.selbox.selectedItem().label()
text = self.avwidgets[selected][4]
print text
self.codechild_[selected] = self.factory.createRichText(self.code, str(text))
self.dialog.recalcLayout()
self.code.showChild()
def handleevent(self):
"""
Event-handler for the 'widgets' demo
"""
while True:
event = self.dialog.waitForEvent()
if event.eventType() == yui.YEvent.CancelEvent:
self.dialog.destroy()
break
if event.widget() == self.closebutton:
self.dialog.destroy()
break
if event.widget() == self.selbox:
self.dialog.startMultipleChanges()
self.updatedisplay()
self.updatedescription()
self.updatecode()
self.dialog.doneMultipleChanges()
if __name__ == "__main__":
avwidgets = {}
avwidgets["PushButton"]=['createPushButton(self.display, "&OK")',
None,
None,
"""This Widget is a Button with a name and a configurable shortcut""",
"""Code:<br>myPushButton = factory.createPushButton(parentWidget, Name) <br>e.g. myPushButton = f.createPushButton(myHBox, "&OK")"""]
avwidgets["ComboBox"] =['createComboBox(self.display, "Combobox")',
'.addItem("Item1")' ,
'.addItem("Item2")' ,
"""This Widget is a Combobox with 1 or more items.""",
"""Code: <br>
myComboBox = factory.createComboBox(parentWidget, "Name") <br>
myComboBox.addItem("Item") <br>
Event: <br>
if event.widget() == myComboBox: <br>
dosomething() """]
avwidgets["InputField"]=['createInputField(self.display, "Inputfield")',
'.setValue("Input nonsense here")',
None,
"""This Widget is an InputField for User-Input""",
"""Code:<br>
myInputField = factory.createInputField(parentWidget, "Name") <br>
myInputField.setValue("Insert valid input here") <br>
myInputField.setValidChars("abcdefghijklmnopqrstuvwxyz") <br>
Event: <br>
if event.widget() = myInputField: <br>
value = myInputField.value()
"""]
avwidgets["CheckBox"] =['createCheckBox(self.display, "Checkbox")',
'.setChecked(True)',
None,
"""This Widget is a Checkbox""",
"""Code:<br>
myCheckBox = fatory.createCheckbox(parentWidget, "Name") <br>
myCheckbox.setEnabled(True) <br>
Event: <br>
if event.widget() == myCheckbox: <br>
if myCheckbox.isChecked(): <br>
print "Box is checked"
"""]
avwidgets["Frame"] =['createFrame(self.display, "Frame")',
".setStretchable(0,True)",
".setStretchable(1,True)",
"""This Widget is a Frame. It can hold other widgets (vbox,hbox,single widget).""",
"""Code:<br>
myFrame = factory.createFrame(parentWidget, "Name") <br>
"""]
avwidgets["Label"] =['createLabel(self.display, "Label")',
None,
None,
"""This Widget is a Label""",
"""Code: <br>
myLabel = factory.createLabel(parentWidget, "LabelText") <br>
"""]
avwidgets["LogView"] =['createLogView(self.display, "LogView", 10, 10)',
'.appendLines("Logtext1 ")',
'.appendLines("Logtext2 ")',
"""This Widget is a Log-window.""",
"""Code:<br>
myLogView = factory.createLogView(parentWidget, "Name", nrLinesShown, nrLinesCached)<br>
myLogView.appendLines("Logtext1")
"""] # can't use \n in Logtext1 ... need to check
avwidgets["ProgressBar"]=['createProgressBar(self.display, "ProgressBar", 100)',
'.setValue(10)',
None,
"""This Widget is a ProgressBar.""",
"""Code:<br>
myProgressBar = factory.createProgressBar(parentWidget, "Name", maxpercentvalue) <br>
e.g.: <br>
myProgressBar = factory.createProgressBar(dialog, "Progress", 100") <br>
myProgressBar.setValue(33)
"""]
avwidgets["SelectionBox"]=['createSelectionBox(self.display, "Selection")',
'.addItem("SELBOX_item1")',
'.addItem("SELBOX_item2")',
"""This Widget is a SelectionBox""",
"""Code:<br>
mySelectionBox = factory.createSelectionBox(parentWidget, "Name") <br>
mySelectionBox.addItem("Item1") <br>
Event:<br>
if event.widget() = mySelectionBox: <br>
selected = mySelectionBox.selectedItem()
"""]
MY_MAIN_GUI = WIDGETS(avwidgets)
MY_MAIN_GUI.handleevent()
| lgpl-3.0 |
Arable/evepod | lib/python2.7/site-packages/newrelic-2.12.0.10/newrelic/core/function_node.py | 2 | 1687 | from collections import namedtuple
import newrelic.core.trace_node
from newrelic.core.metric import TimeMetric
_FunctionNode = namedtuple('_FunctionNode',
['group', 'name', 'children', 'start_time', 'end_time',
'duration', 'exclusive', 'label', 'params'])
class FunctionNode(_FunctionNode):
def time_metrics(self, stats, root, parent):
"""Return a generator yielding the timed metrics for this
function node as well as all the child nodes.
"""
name = '%s/%s' % (self.group, self.name)
yield TimeMetric(name=name, scope='', duration=self.duration,
exclusive=self.exclusive)
yield TimeMetric(name=name, scope=root.path,
duration=self.duration, exclusive=self.exclusive)
# Now for the children.
for child in self.children:
for metric in child.time_metrics(stats, root, self):
yield metric
def trace_node(self, stats, root):
name = '%s/%s' % (self.group, self.name)
name = root.string_table.cache(name)
start_time = newrelic.core.trace_node.node_start_time(root, self)
end_time = newrelic.core.trace_node.node_end_time(root, self)
root.trace_node_count += 1
children = []
for child in self.children:
if root.trace_node_count > root.trace_node_limit:
break
children.append(child.trace_node(stats, root))
params = self.params or None
return newrelic.core.trace_node.TraceNode(start_time=start_time,
end_time=end_time, name=name, params=params, children=children,
label=self.label)
| apache-2.0 |
kisel/trex-core | scripts/automation/trex_control_plane/server/outer_packages.py | 1 | 1560 | #!/router/bin/python
import sys
import os
python_ver = 'python%s' % sys.version_info.major
ucs_ver = 'ucs2' if sys.maxunicode == 65535 else 'ucs4'
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
ZMQ_PATH = os.path.abspath(os.path.join(PATH_TO_PYTHON_LIB, 'pyzmq-14.5.0', python_ver, ucs_ver, '64bit'))
YAML_PATH = os.path.abspath(os.path.join(PATH_TO_PYTHON_LIB, 'pyyaml-3.11', python_ver))
SERVER_MODULES = [
'simple_enum',
'zmq',
'jsonrpclib-pelix-0.2.5',
'python-daemon-2.0.5',
'lockfile-0.10.2',
'termstyle'
]
def import_server_modules():
# must be in a higher priority
if PATH_TO_PYTHON_LIB not in sys.path:
sys.path.insert(0, PATH_TO_PYTHON_LIB)
for path in (ROOT_PATH, ZMQ_PATH, YAML_PATH):
if path not in sys.path:
sys.path.insert(0, path)
import_module_list(SERVER_MODULES)
def import_module_list(modules_list):
assert(isinstance(modules_list, list))
for p in modules_list:
full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
fix_path = os.path.normcase(full_path)
if full_path not in sys.path:
sys.path.insert(1, full_path)
import_server_modules()
| apache-2.0 |
jtrobec/pants | src/python/pants/task/changed_file_task_mixin.py | 2 | 6080 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import re
from pants.base.build_environment import get_scm
from pants.base.exceptions import TaskError
from pants.build_graph.source_mapper import SpecSourceMapper
from pants.goal.workspace import ScmWorkspace
class ChangeCalculator(object):
"""A utility for calculating changed files or changed target addresses."""
def __init__(self,
scm,
workspace,
address_mapper,
build_graph,
include_dependees,
fast=False,
changes_since=None,
diffspec=None,
exclude_target_regexp=None,
spec_excludes=None):
self._scm = scm
self._workspace = workspace
self._address_mapper = address_mapper
self._build_graph = build_graph
self._include_dependees = include_dependees
self._fast = fast
self._changes_since = changes_since
self._diffspec = diffspec
self._exclude_target_regexp = exclude_target_regexp
self._spec_excludes = spec_excludes
self._mapper_cache = None
@property
def _mapper(self):
if self._mapper_cache is None:
self._mapper_cache = SpecSourceMapper(self._address_mapper, self._build_graph, self._fast)
return self._mapper_cache
def changed_files(self):
"""Determines the files changed according to SCM/workspace and options."""
if self._diffspec:
return self._workspace.changes_in(self._diffspec)
else:
since = self._changes_since or self._scm.current_rev_identifier()
return self._workspace.touched_files(since)
def _directly_changed_targets(self):
# Internal helper to find target addresses containing SCM changes.
targets_for_source = self._mapper.target_addresses_for_source
result = set()
for src in self.changed_files():
result.update(set(targets_for_source(src)))
return result
def _find_changed_targets(self):
# Internal helper to find changed targets, optionally including their dependees.
changed = self._directly_changed_targets()
# Skip loading the graph or doing any further work if no directly changed targets found.
if not changed:
return changed
if self._include_dependees == 'none':
return changed
# Load the whole build graph since we need it for dependee finding in either remaining case.
for address in self._address_mapper.scan_addresses(spec_excludes=self._spec_excludes):
self._build_graph.inject_address_closure(address)
if self._include_dependees == 'direct':
return changed.union(*[self._build_graph.dependents_of(addr) for addr in changed])
if self._include_dependees == 'transitive':
return set(t.address for t in self._build_graph.transitive_dependees_of_addresses(changed))
# Should never get here.
raise ValueError('Unknown dependee inclusion: "{}"'.format(self._include_dependees))
def changed_target_addresses(self):
"""Find changed targets, according to SCM.
This is the intended entry point for finding changed targets unless callers have a specific
reason to call one of the above internal helpers. It will find changed targets and:
- Optionally find changes in a given diffspec (commit, branch, tag, range, etc).
- Optionally include direct or transitive dependees.
- Optionally filter targets matching exclude_target_regexp.
:returns: A set of target addresses.
"""
# Find changed targets (and maybe their dependees).
changed = self._find_changed_targets()
# Remove any that match the exclude_target_regexp list.
excludes = [re.compile(pattern) for pattern in self._exclude_target_regexp]
return set([
t for t in changed if not any(exclude.search(t.spec) is not None for exclude in excludes)
])
class ChangedFileTaskMixin(object):
"""A mixin for tasks which require the set of targets (or files) changed according to SCM.
Changes are calculated relative to a ref/tree-ish (defaults to HEAD), and changed files are then
mapped to targets using LazySourceMapper. LazySourceMapper can optionally be used in "fast" mode,
which stops searching for additional owners for a given source once a one is found.
"""
@classmethod
def register_change_file_options(cls, register):
register('--fast', action='store_true', default=False,
help='Stop searching for owners once a source is mapped to at least owning target.')
register('--changes-since', '--parent',
help='Calculate changes since this tree-ish/scm ref (defaults to current HEAD/tip).')
register('--diffspec',
help='Calculate changes contained within given scm spec (commit range/sha/ref/etc).')
register('--include-dependees', choices=['none', 'direct', 'transitive'], default='none',
help='Include direct or transitive dependees of changed targets.')
@classmethod
def change_calculator(cls, options, address_mapper, build_graph, scm=None, workspace=None, spec_excludes=None):
scm = scm or get_scm()
if scm is None:
raise TaskError('No SCM available.')
workspace = workspace or ScmWorkspace(scm)
return ChangeCalculator(scm,
workspace,
address_mapper,
build_graph,
options.include_dependees,
fast=options.fast,
changes_since=options.changes_since,
diffspec=options.diffspec,
# NB: exclude_target_regexp is a global scope option registered
# elsewhere
exclude_target_regexp=options.exclude_target_regexp,
spec_excludes=spec_excludes)
| apache-2.0 |
pkill-nine/qutebrowser | tests/unit/commands/test_cmdutils.py | 2 | 15695 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=unused-variable
"""Tests for qutebrowser.commands.cmdutils."""
import sys
import logging
import types
import pytest
from qutebrowser.commands import cmdutils, cmdexc, argparser, command
from qutebrowser.utils import usertypes, typing
@pytest.fixture(autouse=True)
def clear_globals(monkeypatch):
"""Clear the cmdutils globals between each test."""
monkeypatch.setattr(cmdutils, 'cmd_dict', {})
monkeypatch.setattr(cmdutils, 'aliases', [])
def _get_cmd(*args, **kwargs):
"""Get a command object created via @cmdutils.register.
Args:
Passed to @cmdutils.register decorator
"""
@cmdutils.register(*args, **kwargs)
def fun():
"""Blah."""
pass
return cmdutils.cmd_dict['fun']
class TestCheckOverflow:
def test_good(self):
cmdutils.check_overflow(1, 'int')
def test_bad(self):
int32_max = 2 ** 31 - 1
with pytest.raises(cmdexc.CommandError, match="Numeric argument is "
"too large for internal int representation."):
cmdutils.check_overflow(int32_max + 1, 'int')
class TestCheckExclusive:
@pytest.mark.parametrize('flags', [[], [False, True], [False, False]])
def test_good(self, flags):
cmdutils.check_exclusive(flags, [])
def test_bad(self):
with pytest.raises(cmdexc.CommandError,
match="Only one of -x/-y/-z can be given!"):
cmdutils.check_exclusive([True, True], 'xyz')
class TestRegister:
def test_simple(self):
@cmdutils.register()
def fun():
"""Blah."""
pass
cmd = cmdutils.cmd_dict['fun']
assert cmd.handler is fun
assert cmd.name == 'fun'
assert len(cmdutils.cmd_dict) == 1
assert not cmdutils.aliases
def test_underlines(self):
"""Make sure the function name is normalized correctly (_ -> -)."""
@cmdutils.register()
def eggs_bacon():
"""Blah."""
pass
assert cmdutils.cmd_dict['eggs-bacon'].name == 'eggs-bacon'
assert 'eggs_bacon' not in cmdutils.cmd_dict
def test_lowercasing(self):
"""Make sure the function name is normalized correctly (uppercase)."""
@cmdutils.register()
def Test(): # pylint: disable=invalid-name
"""Blah."""
pass
assert cmdutils.cmd_dict['test'].name == 'test'
assert 'Test' not in cmdutils.cmd_dict
def test_explicit_name(self):
"""Test register with explicit name."""
@cmdutils.register(name='foobar')
def fun():
"""Blah."""
pass
assert cmdutils.cmd_dict['foobar'].name == 'foobar'
assert 'fun' not in cmdutils.cmd_dict
assert len(cmdutils.cmd_dict) == 1
assert not cmdutils.aliases
def test_multiple_names(self):
"""Test register with name being a list."""
@cmdutils.register(name=['foobar', 'blub'])
def fun():
"""Blah."""
pass
assert cmdutils.cmd_dict['foobar'].name == 'foobar'
assert cmdutils.cmd_dict['blub'].name == 'foobar'
assert 'fun' not in cmdutils.cmd_dict
assert len(cmdutils.cmd_dict) == 2
assert cmdutils.aliases == ['blub']
def test_multiple_registrations(self):
"""Make sure registering the same name twice raises ValueError."""
@cmdutils.register(name=['foobar', 'blub'])
def fun():
"""Blah."""
pass
with pytest.raises(ValueError):
@cmdutils.register(name=['blah', 'blub'])
def fun2():
"""Blah."""
pass
def test_instance(self):
"""Make sure the instance gets passed to Command."""
@cmdutils.register(instance='foobar')
def fun(self):
"""Blah."""
pass
assert cmdutils.cmd_dict['fun']._instance == 'foobar'
def test_kwargs(self):
"""Make sure the other keyword arguments get passed to Command."""
@cmdutils.register(hide=True)
def fun():
"""Blah."""
pass
assert cmdutils.cmd_dict['fun'].hide
def test_star_args(self):
"""Check handling of *args."""
@cmdutils.register()
def fun(*args):
"""Blah."""
pass
with pytest.raises(argparser.ArgumentParserError):
cmdutils.cmd_dict['fun'].parser.parse_args([])
def test_star_args_optional(self):
"""Check handling of *args withstar_args_optional."""
@cmdutils.register(star_args_optional=True)
def fun(*args):
"""Blah."""
assert not args
cmd = cmdutils.cmd_dict['fun']
cmd.namespace = cmd.parser.parse_args([])
args, kwargs = cmd._get_call_args(win_id=0)
fun(*args, **kwargs)
@pytest.mark.parametrize('inp, expected', [
(['--arg'], True), (['-a'], True), ([], False)])
def test_flag(self, inp, expected):
@cmdutils.register()
def fun(arg=False):
"""Blah."""
assert arg == expected
cmd = cmdutils.cmd_dict['fun']
cmd.namespace = cmd.parser.parse_args(inp)
assert cmd.namespace.arg == expected
def test_flag_argument(self):
@cmdutils.register()
@cmdutils.argument('arg', flag='b')
def fun(arg=False):
"""Blah."""
assert arg
cmd = cmdutils.cmd_dict['fun']
with pytest.raises(argparser.ArgumentParserError):
cmd.parser.parse_args(['-a'])
cmd.namespace = cmd.parser.parse_args(['-b'])
assert cmd.namespace.arg
args, kwargs = cmd._get_call_args(win_id=0)
fun(*args, **kwargs)
def test_partial_arg(self):
"""Test with only some arguments decorated with @cmdutils.argument."""
@cmdutils.register()
@cmdutils.argument('arg1', flag='b')
def fun(arg1=False, arg2=False):
"""Blah."""
pass
def test_win_id(self):
@cmdutils.register()
@cmdutils.argument('win_id', win_id=True)
def fun(win_id):
"""Blah."""
pass
assert cmdutils.cmd_dict['fun']._get_call_args(42) == ([42], {})
def test_count(self):
@cmdutils.register()
@cmdutils.argument('count', count=True)
def fun(count=0):
"""Blah."""
pass
assert cmdutils.cmd_dict['fun']._get_call_args(42) == ([0], {})
def test_count_without_default(self):
with pytest.raises(TypeError, match="fun: handler has count parameter "
"without default!"):
@cmdutils.register()
@cmdutils.argument('count', count=True)
def fun(count):
"""Blah."""
pass
@pytest.mark.parametrize('hide', [True, False])
def test_pos_args(self, hide):
@cmdutils.register()
@cmdutils.argument('arg', hide=hide)
def fun(arg):
"""Blah."""
pass
pos_args = cmdutils.cmd_dict['fun'].pos_args
if hide:
assert pos_args == []
else:
assert pos_args == [('arg', 'arg')]
Enum = usertypes.enum('Test', ['x', 'y'])
@pytest.mark.parametrize('typ, inp, choices, expected', [
(int, '42', None, 42),
(int, 'x', None, cmdexc.ArgumentTypeError),
(str, 'foo', None, 'foo'),
(typing.Union[str, int], 'foo', None, 'foo'),
(typing.Union[str, int], '42', None, 42),
# Choices
(str, 'foo', ['foo'], 'foo'),
(str, 'bar', ['foo'], cmdexc.ArgumentTypeError),
# Choices with Union: only checked when it's a str
(typing.Union[str, int], 'foo', ['foo'], 'foo'),
(typing.Union[str, int], 'bar', ['foo'], cmdexc.ArgumentTypeError),
(typing.Union[str, int], '42', ['foo'], 42),
(Enum, 'x', None, Enum.x),
(Enum, 'z', None, cmdexc.ArgumentTypeError),
])
def test_typed_args(self, typ, inp, choices, expected):
@cmdutils.register()
@cmdutils.argument('arg', choices=choices)
def fun(arg: typ):
"""Blah."""
assert arg == expected
cmd = cmdutils.cmd_dict['fun']
cmd.namespace = cmd.parser.parse_args([inp])
if expected is cmdexc.ArgumentTypeError:
with pytest.raises(cmdexc.ArgumentTypeError):
cmd._get_call_args(win_id=0)
else:
args, kwargs = cmd._get_call_args(win_id=0)
assert args == [expected]
assert kwargs == {}
fun(*args, **kwargs)
def test_choices_no_annotation(self):
# https://github.com/qutebrowser/qutebrowser/issues/1871
@cmdutils.register()
@cmdutils.argument('arg', choices=['foo', 'bar'])
def fun(arg):
"""Blah."""
pass
cmd = cmdutils.cmd_dict['fun']
cmd.namespace = cmd.parser.parse_args(['fish'])
with pytest.raises(cmdexc.ArgumentTypeError):
cmd._get_call_args(win_id=0)
def test_choices_no_annotation_kwonly(self):
# https://github.com/qutebrowser/qutebrowser/issues/1871
@cmdutils.register()
@cmdutils.argument('arg', choices=['foo', 'bar'])
def fun(*, arg='foo'):
"""Blah."""
pass
cmd = cmdutils.cmd_dict['fun']
cmd.namespace = cmd.parser.parse_args(['--arg=fish'])
with pytest.raises(cmdexc.ArgumentTypeError):
cmd._get_call_args(win_id=0)
def test_pos_arg_info(self):
@cmdutils.register()
@cmdutils.argument('foo', choices=('a', 'b'))
@cmdutils.argument('bar', choices=('x', 'y'))
@cmdutils.argument('opt')
def fun(foo, bar, opt=False):
"""Blah."""
pass
cmd = cmdutils.cmd_dict['fun']
assert cmd.get_pos_arg_info(0) == command.ArgInfo(choices=('a', 'b'))
assert cmd.get_pos_arg_info(1) == command.ArgInfo(choices=('x', 'y'))
with pytest.raises(IndexError):
cmd.get_pos_arg_info(2)
def test_keyword_only_without_default(self):
# https://github.com/qutebrowser/qutebrowser/issues/1872
def fun(*, target):
"""Blah."""
pass
with pytest.raises(TypeError, match="fun: handler has keyword only "
"argument 'target' without default!"):
fun = cmdutils.register()(fun)
def test_typed_keyword_only_without_default(self):
# https://github.com/qutebrowser/qutebrowser/issues/1872
def fun(*, target: int):
"""Blah."""
pass
with pytest.raises(TypeError, match="fun: handler has keyword only "
"argument 'target' without default!"):
fun = cmdutils.register()(fun)
class TestArgument:
"""Test the @cmdutils.argument decorator."""
def test_invalid_argument(self):
with pytest.raises(ValueError, match="fun has no argument foo!"):
@cmdutils.argument('foo')
def fun(bar):
"""Blah."""
pass
def test_storage(self):
@cmdutils.argument('foo', flag='x')
@cmdutils.argument('bar', flag='y')
def fun(foo, bar):
"""Blah."""
pass
expected = {
'foo': command.ArgInfo(flag='x'),
'bar': command.ArgInfo(flag='y')
}
assert fun.qute_args == expected
def test_wrong_order(self):
"""When @cmdutils.argument is used above (after) @register, fail."""
with pytest.raises(ValueError, match=r"@cmdutils.argument got called "
r"above \(after\) @cmdutils.register for fun!"):
@cmdutils.argument('bar', flag='y')
@cmdutils.register()
def fun(bar):
"""Blah."""
pass
def test_count_and_win_id_same_arg(self):
with pytest.raises(TypeError,
match="Argument marked as both count/win_id!"):
@cmdutils.argument('arg', count=True, win_id=True)
def fun(arg=0):
"""Blah."""
pass
def test_no_docstring(self, caplog):
with caplog.at_level(logging.WARNING):
@cmdutils.register()
def fun():
# no docstring
pass
assert len(caplog.records) == 1
msg = caplog.records[0].message
assert msg.endswith('test_cmdutils.py has no docstring')
def test_no_docstring_with_optimize(self, monkeypatch):
"""With -OO we'd get a warning on start, but no warning afterwards."""
monkeypatch.setattr(sys, 'flags', types.SimpleNamespace(optimize=2))
@cmdutils.register()
def fun():
# no docstring
pass
class TestRun:
@pytest.fixture(autouse=True)
def patch_backend(self, mode_manager, monkeypatch):
monkeypatch.setattr(command.objects, 'backend',
usertypes.Backend.QtWebKit)
@pytest.mark.parametrize('backend, used, ok', [
(usertypes.Backend.QtWebEngine, usertypes.Backend.QtWebEngine, True),
(usertypes.Backend.QtWebEngine, usertypes.Backend.QtWebKit, False),
(usertypes.Backend.QtWebKit, usertypes.Backend.QtWebEngine, False),
(usertypes.Backend.QtWebKit, usertypes.Backend.QtWebKit, True),
(None, usertypes.Backend.QtWebEngine, True),
(None, usertypes.Backend.QtWebKit, True),
])
def test_backend(self, monkeypatch, backend, used, ok):
monkeypatch.setattr(command.objects, 'backend', used)
cmd = _get_cmd(backend=backend)
if ok:
cmd.run(win_id=0)
else:
with pytest.raises(cmdexc.PrerequisitesError,
match=r'.* backend\.'):
cmd.run(win_id=0)
def test_no_args(self):
cmd = _get_cmd()
cmd.run(win_id=0)
def test_instance_unavailable_with_backend(self, monkeypatch):
"""Test what happens when a backend doesn't have an objreg object.
For example, QtWebEngine doesn't have 'hintmanager' registered. We make
sure the backend checking happens before resolving the instance, so we
display an error instead of crashing.
"""
@cmdutils.register(instance='doesnotexist',
backend=usertypes.Backend.QtWebEngine)
def fun(self):
"""Blah."""
pass
monkeypatch.setattr(command.objects, 'backend',
usertypes.Backend.QtWebKit)
cmd = cmdutils.cmd_dict['fun']
with pytest.raises(cmdexc.PrerequisitesError, match=r'.* backend\.'):
cmd.run(win_id=0)
| gpl-3.0 |
mtconley/turntable | test/lib/python2.7/site-packages/numpy/__config__.py | 6 | 1421 | # This file is generated by /private/var/folders/xc/0j24hj7d45dcj9_r_sb1w3jdnh248k/T/pip-build-g429nk/numpy/-c
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
atlas_3_10_blas_info={}
atlas_3_10_blas_threads_info={}
atlas_threads_info={}
blas_opt_info={'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3)], 'extra_compile_args': ['-msse3', '-DAPPLE_ACCELERATE_SGEMV_PATCH', '-I/System/Library/Frameworks/vecLib.framework/Headers']}
atlas_blas_threads_info={}
openblas_info={}
lapack_opt_info={'extra_link_args': ['-Wl,-framework', '-Wl,Accelerate'], 'define_macros': [('NO_ATLAS_INFO', 3)], 'extra_compile_args': ['-msse3', '-DAPPLE_ACCELERATE_SGEMV_PATCH']}
openblas_lapack_info={}
atlas_3_10_threads_info={}
atlas_info={}
atlas_3_10_info={}
lapack_mkl_info={}
blas_mkl_info={}
atlas_blas_info={}
mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| mit |
Open-Transport/synthese | packages/ineo_terminus_tl/src/girouette.py | 1 | 6278 | #!/usr/bin/python2
import datetime
import re
from HTMLParser import HTMLParser
import synthese
try:
from lxml import etree
except ImportError:
print("la lib lxml n'est pas disponible")
# Custom subclass of HTMLParser that extracts the lines of text from a HTML document
class HTMLTextExtractor(HTMLParser):
def __init__(self):
from HTMLParser import HTMLParser
HTMLParser.__init__(self)
self.lines = []
self.current_line = ''
self.after_entity = False
def handle_starttag(self, tag, attrs):
# If tag is a <br/>, append current line and start new line
if tag == 'br':
self.lines.append(self.current_line)
self.current_line = ''
self.after_entity = False
def handle_data(self, data):
# Concatenate data to current line
self.current_line += data if len(self.current_line) == 0 else (('' if self.after_entity else ' ') + data)
self.after_entity = False
def handle_entityref(self, data):
# Decode entity and concatenate it to current line
from htmlentitydefs import name2codepoint
character = ' ' if data == 'nbsp' else unichr(name2codepoint[data])
self.current_line = self.current_line + character
self.after_entity = True
def feed(self, data):
from HTMLParser import HTMLParser
data_with_br = data.replace("\n", "<br/>")
HTMLParser.feed(self, data_with_br)
if len(self.current_line) > 0:
self.lines.append(self.current_line)
self.current_line = ''
def get_lines(self):
return self.lines
def wrap_lines(self, max_lines_count, max_line_size):
split_lines = []
merged_lines = []
# Break lines to match max line size
for line in self.lines:
if len(line) == 0:
split_lines.append(line)
else:
start_index = 0
end_index = max_line_size
while start_index < len(line):
split_line = line[start_index:end_index]
split_lines.append(split_line)
start_index += max_line_size
end_index += max_line_size
# If there are too many lines, first remove empty lines
if len(split_lines) > max_lines_count:
split_lines[:] = [line for line in split_lines if len(line.strip()) > 0]
# If there are still too many lines, try to concatenate them up to max_line_size
if len(split_lines) <= max_lines_count:
merged_lines = split_lines
else:
merged_line = ''
for split_line in split_lines:
nb_chars = max_line_size - len(merged_line)
if len(merged_line) > 0:
nb_chars = nb_chars - 1
merged_line = merged_line + ' ' + split_line[0:nb_chars]
else:
merged_line = split_line[0:nb_chars]
if len(merged_line) == max_line_size:
merged_lines.append(merged_line)
merged_line = split_line[nb_chars:]
if len(merged_line) > 0:
merged_lines.append(merged_line)
return merged_lines
# Request headers
namespace = "http://www.w3.org/2001/XMLSchema-instance"
locationAttribute = "{%s}noNameSpaceSchemaLocation" % namespace
root = etree.Element("Girouette" + type + "MessageRequest", attrib={locationAttribute: xsd_location} if len(xsd_location) > 0 else {})
childID = etree.SubElement(root, "ID")
childID.text = ID
childRequestTimeStamp = etree.SubElement(root, "RequestTimeStamp")
now = datetime.datetime.now()
childRequestTimeStamp.text = now.strftime("%d/%m/%Y %X")
childRequestorRef = etree.SubElement(root, "RequestorRef")
childRequestorRef.text = "Terminus"
childMessaging = etree.SubElement(root, "Messaging")
# Name
# Ineo SAE requires the name to be defined as ‘XXXX aaaaaaaaaaaaaaaaaaaaaaaaaaa’ where 'XXXX' is a unique message id
childName = etree.SubElement(childMessaging, "Name")
messageID = int(message[0]["message_id"]) % 10000
messageTitle = unicode(message[0]["title"], "utf-8", "ignore")
childName.text = u"{:04d} {:.27s}".format(messageID, messageTitle)
# {Start,Stop}Date
# The start/stop dates sent are the one from the current exploitation day so the dates must be changed
# if the message is sent during period ranging from 00:00 to the end time of the exploitation day
currentDay = now
# Note : the '3' must be kept in sync with the exploitation day = 03:00:00; 27:00:00
if now.hour < 3:
currentDay = now - datetime.timedelta(1)
childStartDate = etree.SubElement(childMessaging, "StartDate")
childStartDate.text = currentDay.strftime("%d/%m/%Y")
childStopDate = etree.SubElement(childMessaging, "StopDate")
childStopDate.text = currentDay.strftime("%d/%m/%Y")
# Code
if int(needs_direction_sign_code) != 0:
childCode = etree.SubElement(childMessaging, "Code")
childCode.text = message[0]["direction_sign_code"]
# Text
# Extract HTML text lines
childText = etree.SubElement(childMessaging, "Text")
htmlParser = HTMLTextExtractor()
unicode_message_text = unicode(message_text, "utf-8", "ignore")
htmlParser.feed(unicode_message_text)
# 'Text' node accepts [1..4] lines * [0..25] characters
lines = htmlParser.wrap_lines(4, 25)
for line in lines:
childLine = etree.SubElement(childText, "Line")
childLine.text = line
# Recipients
childRecipients = etree.SubElement(childMessaging, "Recipients")
recipients = message[0]["recipients"][0]
hasAllNetwork = False
if 'line' in recipients:
# Scan the 'line' recipients to check if the whole transport network is selected
for line in recipients["line"]:
hasAllNetwork = hasAllNetwork or (line["id"] == network_id)
# If it is, use 'AllNetwork' tag
if hasAllNetwork:
childAllNetwork = etree.SubElement(childRecipients, "AllNetwork")
# Else add the Ineo code of each commercial line in the recipients
else:
childLines = etree.SubElement(childRecipients, "Lines")
for line in recipients["line"]:
parameters = { "roid": line["id"] }
linePM = synthese.service("LinesListFunction2", parameters)
lineCodesStr = linePM["line"][0]["creator_id"]
lineCodes = map(lambda x: x.split('|'), lineCodesStr.split(','))
for lineCode in lineCodes:
if lineCode[0] == datasource_id:
childLine = etree.SubElement(childLines, "Line")
childLine.text = lineCode[1]
# Print resulting XML to output stream
print(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding="iso-8859-1"))
| gpl-2.0 |
gavin-feng/odoo | addons/l10n_vn/__init__.py | 425 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module is Copyright (c) 2009-2013 General Solutions (http://gscom.vn) All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
leiferikb/bitpop | build/scripts/slave/swarming/swarming_run_shim.py | 1 | 11828 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Drives tests on Swarming. Both trigger and collect results.
This is the shim that is called through buildbot.
"""
import logging
import optparse
import os
import subprocess
import sys
import threading
import Queue
from common import chromium_utils
from common import find_depot_tools # pylint: disable=W0611
from common import annotator
from slave.swarming import swarming_utils
# From depot tools/
import fix_encoding
def v0_3(
client, swarming_server, isolate_server, priority, dimensions,
task_name, isolated_hash, env, shards):
"""Handles swarm_client/swarming.py starting 7c543276f08.
It was rolled in src on r237619 on 2013-11-27.
"""
cmd = [
sys.executable,
os.path.join(client, 'swarming.py'),
'run',
'--swarming', swarming_server,
'--isolate-server', isolate_server,
'--priority', str(priority),
'--shards', str(shards),
'--task-name', task_name,
'--decorate',
isolated_hash,
]
for name, value in dimensions.iteritems():
if name != 'os':
cmd.extend(('--dimension', name, value))
else:
# Sadly, older version of swarming.py need special handling of os.
old_value = [
k for k, v in swarming_utils.OS_MAPPING.iteritems() if v == value
]
assert len(old_value) == 1
cmd.extend(('--os', old_value[0]))
# Enable profiling on the -dev server.
if '-dev' in swarming_server:
cmd.append('--profile')
for name, value in env.iteritems():
cmd.extend(('--env', name, value))
return cmd
def v0_4(
client, swarming_server, isolate_server, priority, dimensions,
task_name, isolated_hash, env, shards):
"""Handles swarm_client/swarming.py starting b39e8cf08c.
It was rolled in src on r246113 on 2014-01-21.
"""
cmd = [
sys.executable,
os.path.join(client, 'swarming.py'),
'run',
'--swarming', swarming_server,
'--isolate-server', isolate_server,
'--priority', str(priority),
'--shards', str(shards),
'--task-name', task_name,
'--decorate',
isolated_hash,
]
for name, value in dimensions.iteritems():
cmd.extend(('--dimension', name, value))
# Enable profiling on the -dev server.
if '-dev' in swarming_server:
cmd.append('--profile')
for name, value in env.iteritems():
cmd.extend(('--env', name, value))
return cmd
def stream_process(cmd):
"""Calls process cmd and yields its output.
This is not the most efficient nor safe way to do it but it is only meant to
be run on linux so it should be fine. Fix if necessary.
"""
p = subprocess.Popen(
cmd, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
while True:
try:
i = p.stdout.readline()
if i:
if sys.platform == 'win32':
# Instead of using universal_newlines=True which would affect
# buffering, just convert the ending CRLF to LF. Otherwise, it
# creates an double interline.
if i.endswith('\r\n'):
i = i[:-2] + '\n'
yield i
continue
except OSError:
pass
if p.poll() is not None:
break
yield p.returncode
finally:
if p.poll() is None:
p.kill()
def drive_one(
client, version, swarming_server, isolate_server, priority, dimensions,
task_name, cursor, isolated_hash, env, shards, out):
"""Executes the proper handler based on the code layout and --version support.
"""
def send_back(l):
out.put((cursor, l))
if version < (0, 4):
cmd = v0_3(
client, swarming_server, isolate_server, priority, dimensions,
task_name, isolated_hash, env, shards)
else:
cmd = v0_4(
client, swarming_server, isolate_server, priority, dimensions,
task_name, isolated_hash, env, shards)
try:
for i in stream_process(cmd):
send_back(i)
except Exception as e:
send_back(e)
def drive_many(
client, version, swarming_server, isolate_server, priority, dimensions,
steps, builder, build_number):
logging.info(
'drive_many(%s, %s, %s, %s, %s, %s, %s, %s, %d)',
client, version, swarming_server, isolate_server, priority, dimensions,
steps, builder, build_number)
return _drive_many(
client, version, swarming_server, isolate_server, priority, dimensions,
steps, builder, build_number, Queue.Queue())
def _drive_many(
client, version, swarming_server, isolate_server, priority, dimensions,
steps, builder, build_number, out):
"""Internal version, exposed so it can be hooked in test."""
stream = annotator.AdvancedAnnotationStream(sys.stdout, False)
for step_name in sorted(steps):
# Seeds the step first before doing the cursors otherwise it is interleaved
# in the logs of other steps.
stream.seed_step(step_name)
threads = []
# Create the boxes in buildbot in order for consistency.
steps_annotations = {}
for step_name, isolated_hash in sorted(steps.iteritems()):
env = {}
# TODO(maruel): Propagate GTEST_FILTER.
#if gtest_filter not in (None, '', '.', '*'):
# env['GTEST_FILTER'] = gtest_filter
shards = swarming_utils.TESTS_SHARDS.get(step_name, 1)
# This will be the key in steps_annotations.
# TODO(maruel): Work around bug swarming:73 by using unique swarming task
# name. This is done by including the builder name and the build number.
# This is not something we want to keep long term because we lose the
# benefit of skipping running the exact same executable twice for no good
# reason.
task_name = '%s/%s/%s/%s/%d' % (
step_name, dimensions['os'], isolated_hash, builder, build_number)
t = threading.Thread(
target=drive_one,
args=(client, version, swarming_server, isolate_server, priority,
dimensions, task_name, step_name, isolated_hash, env, shards,
out))
t.daemon = True
t.start()
threads.append(t)
# It is important data to surface through buildbot.
steps_annotations[step_name] = annotator.AdvancedAnnotationStep(
sys.stdout, False)
stream.step_cursor(step_name)
steps_annotations[step_name].step_started()
steps_annotations[step_name].step_text(dimensions['os'])
steps_annotations[step_name].step_text(isolated_hash)
sys.stdout.flush()
collect(stream, steps_annotations, step_name, out)
return 0
def collect(stream, steps_annotations, last_cursor, out):
while steps_annotations:
try:
# Polling FTW.
packet = out.get(timeout=1)
except Queue.Empty:
continue
# Each packet contains the task name and a item to process in the main
# thread.
cursor, item = packet
if last_cursor != cursor:
# Switch annotated buildbot cursor if necessary.
assert steps_annotations.get(cursor), steps_annotations
stream.step_cursor(cursor)
# Works around a problem on Windows where the cursor would not be properly
# updated.
sys.stdout.write('\n')
sys.stdout.flush()
last_cursor = cursor
if isinstance(item, (int, Exception)):
# Signals it's completed.
if item:
steps_annotations[cursor].step_failure()
sys.stdout.flush()
if isinstance(item, Exception):
print >> sys.stderr, item
steps_annotations[cursor].step_closed()
# Works around a problem on Windows where the step would not be detected
# as closed until the next output. This breaks the steps duration, the
# step is listed as taking much more time than in reality.
sys.stdout.write('\n')
# TODO(maruel): Even with this, there is still buffering happening
# outside of the control of this script. This is mostly apparant on
# Windows.
sys.stdout.flush()
del steps_annotations[cursor]
else:
assert isinstance(item, str), item
sys.stdout.write(item)
out.task_done()
def determine_steps_to_run(isolated_hashes, testfilter):
"""Returns a dict of test:hash for the test that should be run through
Swarming.
This is done by looking at the build properties to figure out what should be
run.
"""
logging.info(
'determine_steps_to_run(%s, %s)', isolated_hashes, testfilter)
# TODO(maruel): Support gtest filter.
# If testfilter == [], make it behave the same as if defaulttests was
# specified.
testfilter = testfilter or ['defaulttests']
def should_run(name):
if 'defaulttests' in testfilter:
return True
return any(t.startswith(name + '_swarm') for t in testfilter)
return dict(
(name, isolated_hash)
for name, isolated_hash in isolated_hashes.iteritems()
if should_run(name))
def process_build_properties(options):
"""Converts build properties and factory properties into expected flags."""
# target_os is not defined when using a normal builder, contrary to a
# xx_swarm_triggered buildbot<->swarming builder, and it's not needed since
# the OS match, it's defined in builder/tester configurations.
slave_os = options.build_properties.get('target_os', sys.platform)
priority = swarming_utils.build_to_priority(options.build_properties)
steps = determine_steps_to_run(
options.build_properties.get('swarm_hashes', {}),
options.build_properties.get('testfilter', ['defaulttests']))
builder = options.build_properties.get('buildername', 'unknown')
build_number = options.build_properties.get('buildnumber', 0)
return slave_os, priority, steps, builder, build_number
def main(args):
"""Note: this is solely to run the current master's code and can totally
differ from the underlying script flags.
To update these flags:
- Update the following code to support both the previous flag and the new
flag.
- Change scripts/master/factory/swarm_commands.py to pass the new flag.
- Restart all the masters using swarming.
- Remove the old flag from this code.
"""
client = swarming_utils.find_client(os.getcwd())
if not client:
print >> sys.stderr, 'Failed to find swarm(ing)_client'
return 1
version = swarming_utils.get_version(client)
if version < (0, 3):
print >> sys.stderr, (
'%s is version %s which is too old. Please run the test locally' %
(client, '.'.join(version)))
return 1
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('--verbose', action='store_true')
parser.add_option('--swarming')
parser.add_option('--isolate-server')
chromium_utils.AddPropertiesOptions(parser)
options, args = parser.parse_args(args)
if args:
parser.error('Unsupported args: %s' % args)
if not options.swarming or not options.isolate_server:
parser.error('Require both --swarming and --isolate-server')
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
# Loads the other flags implicitly.
slave_os, priority, steps, builder, build_number = process_build_properties(
options)
logging.info('To run: %s, %s, %s', slave_os, priority, steps)
if not steps:
print('Nothing to trigger')
annotator.AdvancedAnnotationStep(sys.stdout, False).step_warnings()
return 0
print('Selected tests:')
print('\n'.join(' %s' % s for s in sorted(steps)))
selected_os = swarming_utils.OS_MAPPING[slave_os]
print('Selected OS: %s' % selected_os)
return drive_many(
client,
version,
options.swarming,
options.isolate_server,
priority,
{'os': selected_os},
steps,
builder,
build_number)
if __name__ == '__main__':
fix_encoding.fix_encoding()
sys.exit(main(sys.argv[1:]))
| gpl-3.0 |
weka-io/boto | boto/opsworks/layer1.py | 132 | 129869 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.opsworks import exceptions
class OpsWorksConnection(AWSQueryConnection):
"""
AWS OpsWorks
Welcome to the AWS OpsWorks API Reference . This guide provides
descriptions, syntax, and usage examples about AWS OpsWorks
actions and data types, including common parameters and error
codes.
AWS OpsWorks is an application management service that provides an
integrated experience for overseeing the complete application
lifecycle. For information about this product, go to the `AWS
OpsWorks`_ details page.
**SDKs and CLI**
The most common way to use the AWS OpsWorks API is by using the
AWS Command Line Interface (CLI) or by using one of the AWS SDKs
to implement applications in your preferred language. For more
information, see:
+ `AWS CLI`_
+ `AWS SDK for Java`_
+ `AWS SDK for .NET`_
+ `AWS SDK for PHP 2`_
+ `AWS SDK for Ruby`_
+ `AWS SDK for Node.js`_
+ `AWS SDK for Python(Boto)`_
**Endpoints**
AWS OpsWorks supports only one endpoint, opsworks.us-
east-1.amazonaws.com (HTTPS), so you must connect to that
endpoint. You can then use the API to direct AWS OpsWorks to
create stacks in any AWS Region.
**Chef Versions**
When you call CreateStack, CloneStack, or UpdateStack we recommend
you use the `ConfigurationManager` parameter to specify the Chef
version, 0.9, 11.4, or 11.10. The default value is currently
11.10. For more information, see `Chef Versions`_.
You can still specify Chef 0.9 for your stack, but new features
are not available for Chef 0.9 stacks, and support is scheduled to
end on July 24, 2014. We do not recommend using Chef 0.9 for new
stacks, and we recommend migrating your existing Chef 0.9 stacks
to Chef 11.10 as soon as possible.
"""
APIVersion = "2013-02-18"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com"
ServiceName = "OpsWorks"
TargetPrefix = "OpsWorks_20130218"
ResponseError = JSONResponseError
_faults = {
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(OpsWorksConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def assign_instance(self, instance_id, layer_ids):
"""
Assign a registered instance to a custom layer. You cannot use
this action with instances that were created with AWS
OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The layer ID, which must correspond to a custom
layer. You cannot assign a registered instance to a built-in layer.
"""
params = {
'InstanceId': instance_id,
'LayerIds': layer_ids,
}
return self.make_request(action='AssignInstance',
body=json.dumps(params))
def assign_volume(self, volume_id, instance_id=None):
"""
Assigns one of the stack's registered Amazon EBS volumes to a
specified instance. The volume must first be registered with
the stack by calling RegisterVolume. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'VolumeId': volume_id, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssignVolume',
body=json.dumps(params))
def associate_elastic_ip(self, elastic_ip, instance_id=None):
"""
Associates one of the stack's registered Elastic IP addresses
with a specified instance. The address must first be
registered with the stack by calling RegisterElasticIp. For
more information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'ElasticIp': elastic_ip, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssociateElasticIp',
body=json.dumps(params))
def attach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Attaches an Elastic Load Balancing load balancer to a
specified layer. For more information, see `Elastic Load
Balancing`_.
You must create the Elastic Load Balancing instance
separately, by using the Elastic Load Balancing console, API,
or CLI. For more information, see ` Elastic Load Balancing
Developer Guide`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is to be attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='AttachElasticLoadBalancer',
body=json.dumps(params))
def clone_stack(self, source_stack_id, service_role_arn, name=None,
region=None, vpc_id=None, attributes=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, chef_configuration=None,
use_custom_cookbooks=None,
use_opsworks_security_groups=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
clone_permissions=None, clone_app_ids=None,
default_root_device_type=None):
"""
Creates a clone of a specified stack. For more information,
see `Clone a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type source_stack_id: string
:param source_stack_id: The source stack ID.
:type name: string
:param name: The cloned stack name.
:type region: string
:param region: The cloned stack AWS region, such as "us-east-1". For
more information about AWS regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the cloned stack is to be
launched into. It must be in the specified region. All instances
are launched into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: A list of stack attributes and values as key/value
pairs to be added to the cloned stack.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. If you create a stack by using the AWS OpsWorks
console, it creates the role for you. You can obtain an existing
stack's IAM ARN programmatically by calling DescribePermissions.
For more information about IAM ARNs, see `Using Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the source
stack's service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stacks's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The cloned stack's default
Availability Zone, which must be in the specified region. For more
information, see `Regions and Endpoints`_. If you also specify a
value for `DefaultSubnetId`, the subnet must be in the same zone.
For more information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether to use custom cookbooks.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default. With
`UseOpsworksSecurityGroups` you can instead provide your own custom
security groups. `UseOpsworksSecurityGroups` has the following
settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type clone_permissions: boolean
:param clone_permissions: Whether to clone the source stack's
permissions.
:type clone_app_ids: list
:param clone_app_ids: A list of source stack app IDs to be included in
the cloned stack.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
"""
params = {
'SourceStackId': source_stack_id,
'ServiceRoleArn': service_role_arn,
}
if name is not None:
params['Name'] = name
if region is not None:
params['Region'] = region
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if clone_permissions is not None:
params['ClonePermissions'] = clone_permissions
if clone_app_ids is not None:
params['CloneAppIds'] = clone_app_ids
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CloneStack',
body=json.dumps(params))
def create_app(self, stack_id, name, type, shortname=None,
description=None, data_sources=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
attributes=None, environment=None):
"""
Creates an app for a specified stack. For more information,
see `Creating Apps`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type shortname: string
:param shortname: The app's short name.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type data_sources: list
:param data_sources: The app's data source.
:type type: string
:param type: The app type. Each supported type is associated with a
particular layer. For example, PHP applications are associated with
a PHP layer. AWS OpsWorks deploys an application to those instances
that are members of the corresponding layer.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether to enable SSL for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type environment: list
:param environment:
An array of `EnvironmentVariable` objects that specify environment
variables to be associated with the app. You can specify up to ten
environment variables. After you deploy the app, these variables
are defined on the associated app server instance.
This parameter is supported only by Chef 11.10 stacks. If you have
specified one or more environment variables, you cannot modify the
stack's Chef version.
"""
params = {'StackId': stack_id, 'Name': name, 'Type': type, }
if shortname is not None:
params['Shortname'] = shortname
if description is not None:
params['Description'] = description
if data_sources is not None:
params['DataSources'] = data_sources
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
if environment is not None:
params['Environment'] = environment
return self.make_request(action='CreateApp',
body=json.dumps(params))
def create_deployment(self, stack_id, command, app_id=None,
instance_ids=None, comment=None, custom_json=None):
"""
Runs deployment or stack commands. For more information, see
`Deploying Apps`_ and `Run Stack Commands`_.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type app_id: string
:param app_id: The app ID. This parameter is required for app
deployments, but not for other deployment commands.
:type instance_ids: list
:param instance_ids: The instance IDs for the deployment targets.
:type command: dict
:param command: A `DeploymentCommand` object that specifies the
deployment command and any associated arguments.
:type comment: string
:param comment: A user-defined comment.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
"""
params = {'StackId': stack_id, 'Command': command, }
if app_id is not None:
params['AppId'] = app_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
if comment is not None:
params['Comment'] = comment
if custom_json is not None:
params['CustomJson'] = custom_json
return self.make_request(action='CreateDeployment',
body=json.dumps(params))
def create_instance(self, stack_id, layer_ids, instance_type,
auto_scaling_type=None, hostname=None, os=None,
ami_id=None, ssh_key_name=None,
availability_zone=None, virtualization_type=None,
subnet_id=None, architecture=None,
root_device_type=None, install_updates_on_boot=None,
ebs_optimized=None):
"""
Creates an instance in a specified stack. For more
information, see `Adding an Instance to a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array that contains the instance layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type: For load-based or time-based instances, the
type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance's operating system, which must be set to one of
the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
The default option is the current Amazon Linux version. If you set this
parameter to `Custom`, you must use the CreateInstance action's
AmiId parameter to specify the custom AMI that you want to use. For
more information on the standard operating systems, see `Operating
Systems`_For more information on how to use custom AMIs with
OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id:
A custom AMI ID to be used to create the instance. The AMI should be
based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_.
If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type availability_zone: string
:param availability_zone: The instance Availability Zone. For more
information, see `Regions and Endpoints`_.
:type virtualization_type: string
:param virtualization_type: The instance's virtualization type,
`paravirtual` or `hvm`.
:type subnet_id: string
:param subnet_id: The ID of the instance's subnet. If the stack is
running in a VPC, you can use this parameter to override the
stack's default subnet ID value and direct AWS OpsWorks to launch
the instance in a different subnet.
:type architecture: string
:param architecture: The instance architecture. The default option is
`x86_64`. Instance types do not necessarily support both
architectures. For a list of the architectures that are supported
by the different instance types, see `Instance Families and
Types`_.
:type root_device_type: string
:param root_device_type: The instance root device type. For more
information, see `Storage for the Root Device`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True` to ensure that
your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether to create an Amazon EBS-optimized
instance.
"""
params = {
'StackId': stack_id,
'LayerIds': layer_ids,
'InstanceType': instance_type,
}
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if virtualization_type is not None:
params['VirtualizationType'] = virtualization_type
if subnet_id is not None:
params['SubnetId'] = subnet_id
if architecture is not None:
params['Architecture'] = architecture
if root_device_type is not None:
params['RootDeviceType'] = root_device_type
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if ebs_optimized is not None:
params['EbsOptimized'] = ebs_optimized
return self.make_request(action='CreateInstance',
body=json.dumps(params))
def create_layer(self, stack_id, type, name, shortname, attributes=None,
custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
"""
Creates a layer. For more information, see `How to Create a
Layer`_.
You should use **CreateLayer** for noncustom layer types such
as PHP App Server only if the stack does not have an existing
layer of that type. A stack can have at most one instance of
each noncustom layer; if you attempt to create a second
instance, **CreateLayer** fails. A stack can have an arbitrary
number of custom layers, so you can call **CreateLayer** as
many times as you like for that layer type.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The layer stack ID.
:type type: string
:param type: The layer type. A stack cannot have more than one built-in
layer of the same type. It can have any number of custom layers.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorks and by Chef recipes. The short name is also used as the
name for the directory where your app files are installed. It can
have a maximum of 200 characters, which are limited to the
alphanumeric characters, '-', '_', and '.'.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile that to
be used for the layer's EC2 instances. For more information about
IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer custom
security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the layer
packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
:type lifecycle_event_configuration: dict
:param lifecycle_event_configuration: A LifeCycleEventConfiguration
object that you can use to configure the Shutdown event to specify
an execution timeout and enable or disable Elastic Load Balancer
connection draining.
"""
params = {
'StackId': stack_id,
'Type': type,
'Name': name,
'Shortname': shortname,
}
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
if lifecycle_event_configuration is not None:
params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='CreateLayer',
body=json.dumps(params))
def create_stack(self, name, region, service_role_arn,
default_instance_profile_arn, vpc_id=None,
attributes=None, default_os=None, hostname_theme=None,
default_availability_zone=None, default_subnet_id=None,
custom_json=None, configuration_manager=None,
chef_configuration=None, use_custom_cookbooks=None,
use_opsworks_security_groups=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
default_root_device_type=None):
"""
Creates a new stack. For more information, see `Create a New
Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type name: string
:param name: The stack name.
:type region: string
:param region: The stack AWS region, such as "us-east-1". For more
information about Amazon regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the stack is to be launched into.
It must be in the specified region. All instances are launched into
this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type service_role_arn: string
:param service_role_arn: The stack AWS Identity and Access Management
(IAM) role, which allows AWS OpsWorks to work with AWS resources on
your behalf. You must set this parameter to the Amazon Resource
Name (ARN) for an existing IAM role. For more information about IAM
ARNs, see `Using Identifiers`_.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default. With
`UseOpsworksSecurityGroups` you can instead provide your own custom
security groups. `UseOpsworksSecurityGroups` has the following
settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the stack, but you
can override it when you create an instance. The default option is
`instance-store`. For more information, see `Storage for the Root
Device`_.
"""
params = {
'Name': name,
'Region': region,
'ServiceRoleArn': service_role_arn,
'DefaultInstanceProfileArn': default_instance_profile_arn,
}
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CreateStack',
body=json.dumps(params))
def create_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Creates a new user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name. The allowable characters
are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name
includes other punctuation marks, AWS OpsWorks removes them. For
example, `my.name` will be changed to `myname`. If you do not
specify an SSH user name, AWS OpsWorks generates one from the IAM
user name.
:type ssh_public_key: string
:param ssh_public_key: The user's public SSH key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Setting an IAM User's Public SSH Key`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='CreateUserProfile',
body=json.dumps(params))
def delete_app(self, app_id):
"""
Deletes a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
"""
params = {'AppId': app_id, }
return self.make_request(action='DeleteApp',
body=json.dumps(params))
def delete_instance(self, instance_id, delete_elastic_ip=None,
delete_volumes=None):
"""
Deletes a specified instance, which terminates the associated
Amazon EC2 instance. You must stop an instance before you can
delete it.
For more information, see `Deleting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type delete_elastic_ip: boolean
:param delete_elastic_ip: Whether to delete the instance Elastic IP
address.
:type delete_volumes: boolean
:param delete_volumes: Whether to delete the instance's Amazon EBS
volumes.
"""
params = {'InstanceId': instance_id, }
if delete_elastic_ip is not None:
params['DeleteElasticIp'] = delete_elastic_ip
if delete_volumes is not None:
params['DeleteVolumes'] = delete_volumes
return self.make_request(action='DeleteInstance',
body=json.dumps(params))
def delete_layer(self, layer_id):
"""
Deletes a specified layer. You must first stop and then delete
all associated instances or unassign registered instances. For
more information, see `How to Delete a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='DeleteLayer',
body=json.dumps(params))
def delete_stack(self, stack_id):
"""
Deletes a specified stack. You must first delete all
instances, layers, and apps or deregister registered
instances. For more information, see `Shut Down a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DeleteStack',
body=json.dumps(params))
def delete_user_profile(self, iam_user_arn):
"""
Deletes a user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
"""
params = {'IamUserArn': iam_user_arn, }
return self.make_request(action='DeleteUserProfile',
body=json.dumps(params))
def deregister_elastic_ip(self, elastic_ip):
"""
Deregisters a specified Elastic IP address. The address can
then be registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DeregisterElasticIp',
body=json.dumps(params))
def deregister_instance(self, instance_id):
"""
Deregister a registered Amazon EC2 or on-premises instance.
This action removes the instance from the stack and returns it
to your control. This action can not be used with instances
that were created with AWS OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='DeregisterInstance',
body=json.dumps(params))
def deregister_rds_db_instance(self, rds_db_instance_arn):
"""
Deregisters an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
"""
params = {'RdsDbInstanceArn': rds_db_instance_arn, }
return self.make_request(action='DeregisterRdsDbInstance',
body=json.dumps(params))
def deregister_volume(self, volume_id):
"""
Deregisters an Amazon EBS volume. The volume can then be
registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='DeregisterVolume',
body=json.dumps(params))
def describe_apps(self, stack_id=None, app_ids=None):
"""
Requests a description of a specified set of apps.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The app stack ID. If you use this parameter,
`DescribeApps` returns a description of the apps in the specified
stack.
:type app_ids: list
:param app_ids: An array of app IDs for the apps to be described. If
you use this parameter, `DescribeApps` returns a description of the
specified apps. Otherwise, it returns a description of every app.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_ids is not None:
params['AppIds'] = app_ids
return self.make_request(action='DescribeApps',
body=json.dumps(params))
def describe_commands(self, deployment_id=None, instance_id=None,
command_ids=None):
"""
Describes the results of specified commands.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type deployment_id: string
:param deployment_id: The deployment ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified deployment.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified instance.
:type command_ids: list
:param command_ids: An array of command IDs. If you include this
parameter, `DescribeCommands` returns a description of the
specified commands. Otherwise, it returns a description of every
command.
"""
params = {}
if deployment_id is not None:
params['DeploymentId'] = deployment_id
if instance_id is not None:
params['InstanceId'] = instance_id
if command_ids is not None:
params['CommandIds'] = command_ids
return self.make_request(action='DescribeCommands',
body=json.dumps(params))
def describe_deployments(self, stack_id=None, app_id=None,
deployment_ids=None):
"""
Requests a description of a specified set of deployments.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified stack.
:type app_id: string
:param app_id: The app ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified app.
:type deployment_ids: list
:param deployment_ids: An array of deployment IDs to be described. If
you include this parameter, `DescribeDeployments` returns a
description of the specified deployments. Otherwise, it returns a
description of every deployment.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_id is not None:
params['AppId'] = app_id
if deployment_ids is not None:
params['DeploymentIds'] = deployment_ids
return self.make_request(action='DescribeDeployments',
body=json.dumps(params))
def describe_elastic_ips(self, instance_id=None, stack_id=None, ips=None):
"""
Describes `Elastic IP addresses`_.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses associated with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses that are registered with the specified stack.
:type ips: list
:param ips: An array of Elastic IP addresses to be described. If you
include this parameter, `DescribeElasticIps` returns a description
of the specified Elastic IP addresses. Otherwise, it returns a
description of every Elastic IP address.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if ips is not None:
params['Ips'] = ips
return self.make_request(action='DescribeElasticIps',
body=json.dumps(params))
def describe_elastic_load_balancers(self, stack_id=None, layer_ids=None):
"""
Describes a stack's Elastic Load Balancing instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's Elastic
Load Balancing instances.
:type layer_ids: list
:param layer_ids: A list of layer IDs. The action describes the Elastic
Load Balancing instances for the specified layers.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeElasticLoadBalancers',
body=json.dumps(params))
def describe_instances(self, stack_id=None, layer_id=None,
instance_ids=None):
"""
Requests a description of a set of instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified stack.
:type layer_id: string
:param layer_id: A layer ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified layer.
:type instance_ids: list
:param instance_ids: An array of instance IDs to be described. If you
use this parameter, `DescribeInstances` returns a description of
the specified instances. Otherwise, it returns a description of
every instance.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_id is not None:
params['LayerId'] = layer_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
return self.make_request(action='DescribeInstances',
body=json.dumps(params))
def describe_layers(self, stack_id=None, layer_ids=None):
"""
Requests a description of one or more layers in a specified
stack.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array of layer IDs that specify the layers to be
described. If you omit this parameter, `DescribeLayers` returns a
description of every layer in the specified stack.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeLayers',
body=json.dumps(params))
def describe_load_based_auto_scaling(self, layer_ids):
"""
Describes load-based auto scaling configurations for specified
layers.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type layer_ids: list
:param layer_ids: An array of layer IDs.
"""
params = {'LayerIds': layer_ids, }
return self.make_request(action='DescribeLoadBasedAutoScaling',
body=json.dumps(params))
def describe_my_user_profile(self):
"""
Describes a user's SSH information.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
"""
params = {}
return self.make_request(action='DescribeMyUserProfile',
body=json.dumps(params))
def describe_permissions(self, iam_user_arn=None, stack_id=None):
"""
Describes the permissions for a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN. For more information about IAM
ARNs, see `Using Identifiers`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {}
if iam_user_arn is not None:
params['IamUserArn'] = iam_user_arn
if stack_id is not None:
params['StackId'] = stack_id
return self.make_request(action='DescribePermissions',
body=json.dumps(params))
def describe_raid_arrays(self, instance_id=None, stack_id=None,
raid_array_ids=None):
"""
Describe an instance's RAID arrays.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeRaidArrays` returns descriptions of the RAID arrays
associated with the specified instance.
:type stack_id: string
:param stack_id: The stack ID.
:type raid_array_ids: list
:param raid_array_ids: An array of RAID array IDs. If you use this
parameter, `DescribeRaidArrays` returns descriptions of the
specified arrays. Otherwise, it returns a description of every
array.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_ids is not None:
params['RaidArrayIds'] = raid_array_ids
return self.make_request(action='DescribeRaidArrays',
body=json.dumps(params))
def describe_rds_db_instances(self, stack_id, rds_db_instance_arns=None):
"""
Describes Amazon RDS instances.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID that the instances are registered with.
The operation returns descriptions of all registered Amazon RDS
instances.
:type rds_db_instance_arns: list
:param rds_db_instance_arns: An array containing the ARNs of the
instances to be described.
"""
params = {'StackId': stack_id, }
if rds_db_instance_arns is not None:
params['RdsDbInstanceArns'] = rds_db_instance_arns
return self.make_request(action='DescribeRdsDbInstances',
body=json.dumps(params))
def describe_service_errors(self, stack_id=None, instance_id=None,
service_error_ids=None):
"""
Describes AWS OpsWorks service errors.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified stack.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified instance.
:type service_error_ids: list
:param service_error_ids: An array of service error IDs. If you use
this parameter, `DescribeServiceErrors` returns descriptions of the
specified errors. Otherwise, it returns a description of every
error.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if instance_id is not None:
params['InstanceId'] = instance_id
if service_error_ids is not None:
params['ServiceErrorIds'] = service_error_ids
return self.make_request(action='DescribeServiceErrors',
body=json.dumps(params))
def describe_stack_provisioning_parameters(self, stack_id):
"""
Requests a description of a stack's provisioning parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the stack
or an attached policy that explicitly grants permissions. For
more information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackProvisioningParameters',
body=json.dumps(params))
def describe_stack_summary(self, stack_id):
"""
Describes the number of layers and apps in a specified stack,
and the number of instances in each state, such as
`running_setup` or `online`.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackSummary',
body=json.dumps(params))
def describe_stacks(self, stack_ids=None):
"""
Requests a description of one or more stacks.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_ids: list
:param stack_ids: An array of stack IDs that specify the stacks to be
described. If you omit this parameter, `DescribeStacks` returns a
description of every stack.
"""
params = {}
if stack_ids is not None:
params['StackIds'] = stack_ids
return self.make_request(action='DescribeStacks',
body=json.dumps(params))
def describe_time_based_auto_scaling(self, instance_ids):
"""
Describes time-based auto scaling configurations for specified
instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_ids: list
:param instance_ids: An array of instance IDs.
"""
params = {'InstanceIds': instance_ids, }
return self.make_request(action='DescribeTimeBasedAutoScaling',
body=json.dumps(params))
def describe_user_profiles(self, iam_user_arns=None):
"""
Describe specified users.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arns: list
:param iam_user_arns: An array of IAM user ARNs that identify the users
to be described.
"""
params = {}
if iam_user_arns is not None:
params['IamUserArns'] = iam_user_arns
return self.make_request(action='DescribeUserProfiles',
body=json.dumps(params))
def describe_volumes(self, instance_id=None, stack_id=None,
raid_array_id=None, volume_ids=None):
"""
Describes an instance's Amazon EBS volumes.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's
registered Amazon EBS volumes.
:type raid_array_id: string
:param raid_array_id: The RAID array ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified RAID array.
:type volume_ids: list
:param volume_ids: Am array of volume IDs. If you use this parameter,
`DescribeVolumes` returns descriptions of the specified volumes.
Otherwise, it returns a description of every volume.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_id is not None:
params['RaidArrayId'] = raid_array_id
if volume_ids is not None:
params['VolumeIds'] = volume_ids
return self.make_request(action='DescribeVolumes',
body=json.dumps(params))
def detach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Detaches a specified Elastic Load Balancing instance from its
layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='DetachElasticLoadBalancer',
body=json.dumps(params))
def disassociate_elastic_ip(self, elastic_ip):
"""
Disassociates an Elastic IP address from its instance. The
address remains registered with the stack. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DisassociateElasticIp',
body=json.dumps(params))
def get_hostname_suggestion(self, layer_id):
"""
Gets a generated host name for the specified layer, based on
the current host name theme.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='GetHostnameSuggestion',
body=json.dumps(params))
def reboot_instance(self, instance_id):
"""
Reboots a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='RebootInstance',
body=json.dumps(params))
def register_elastic_ip(self, elastic_ip, stack_id):
"""
Registers an Elastic IP address with a specified stack. An
address can be registered with only one stack at a time. If
the address is already registered, you must first deregister
it by calling DeregisterElasticIp. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'ElasticIp': elastic_ip, 'StackId': stack_id, }
return self.make_request(action='RegisterElasticIp',
body=json.dumps(params))
def register_instance(self, stack_id, hostname=None, public_ip=None,
private_ip=None, rsa_public_key=None,
rsa_public_key_fingerprint=None,
instance_identity=None):
"""
Registers instances with a specified stack that were created
outside of AWS OpsWorks.
We do not recommend using this action to register instances.
The complete registration operation has two primary steps,
installing the AWS OpsWorks agent on the instance and
registering the instance with the stack. `RegisterInstance`
handles only the second step. You should instead use the AWS
CLI `register` command, which performs the entire registration
operation.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The ID of the stack that the instance is to be
registered with.
:type hostname: string
:param hostname: The instance's hostname.
:type public_ip: string
:param public_ip: The instance's public IP address.
:type private_ip: string
:param private_ip: The instance's private IP address.
:type rsa_public_key: string
:param rsa_public_key: The instances public RSA key. This key is used
to encrypt communication between the instance and the service.
:type rsa_public_key_fingerprint: string
:param rsa_public_key_fingerprint: The instances public RSA key
fingerprint.
:type instance_identity: dict
:param instance_identity: An InstanceIdentity object that contains the
instance's identity.
"""
params = {'StackId': stack_id, }
if hostname is not None:
params['Hostname'] = hostname
if public_ip is not None:
params['PublicIp'] = public_ip
if private_ip is not None:
params['PrivateIp'] = private_ip
if rsa_public_key is not None:
params['RsaPublicKey'] = rsa_public_key
if rsa_public_key_fingerprint is not None:
params['RsaPublicKeyFingerprint'] = rsa_public_key_fingerprint
if instance_identity is not None:
params['InstanceIdentity'] = instance_identity
return self.make_request(action='RegisterInstance',
body=json.dumps(params))
def register_rds_db_instance(self, stack_id, rds_db_instance_arn,
db_user, db_password):
"""
Registers an Amazon RDS instance with a stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
:type db_user: string
:param db_user: The database's master user name.
:type db_password: string
:param db_password: The database password.
"""
params = {
'StackId': stack_id,
'RdsDbInstanceArn': rds_db_instance_arn,
'DbUser': db_user,
'DbPassword': db_password,
}
return self.make_request(action='RegisterRdsDbInstance',
body=json.dumps(params))
def register_volume(self, stack_id, ec_2_volume_id=None):
"""
Registers an Amazon EBS volume with a specified stack. A
volume can be registered with only one stack at a time. If the
volume is already registered, you must first deregister it by
calling DeregisterVolume. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type ec_2_volume_id: string
:param ec_2_volume_id: The Amazon EBS volume ID.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
if ec_2_volume_id is not None:
params['Ec2VolumeId'] = ec_2_volume_id
return self.make_request(action='RegisterVolume',
body=json.dumps(params))
def set_load_based_auto_scaling(self, layer_id, enable=None,
up_scaling=None, down_scaling=None):
"""
Specify the load-based auto scaling configuration for a
specified layer. For more information, see `Managing Load with
Time-based and Load-based Instances`_.
To use load-based auto scaling, you must create a set of load-
based auto scaling instances. Load-based auto scaling operates
only on the instances from that set, so you must ensure that
you have created enough instances to handle the maximum
anticipated load.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type enable: boolean
:param enable: Enables load-based auto scaling for the layer.
:type up_scaling: dict
:param up_scaling: An `AutoScalingThresholds` object with the upscaling
threshold configuration. If the load exceeds these thresholds for a
specified amount of time, AWS OpsWorks starts a specified number of
instances.
:type down_scaling: dict
:param down_scaling: An `AutoScalingThresholds` object with the
downscaling threshold configuration. If the load falls below these
thresholds for a specified amount of time, AWS OpsWorks stops a
specified number of instances.
"""
params = {'LayerId': layer_id, }
if enable is not None:
params['Enable'] = enable
if up_scaling is not None:
params['UpScaling'] = up_scaling
if down_scaling is not None:
params['DownScaling'] = down_scaling
return self.make_request(action='SetLoadBasedAutoScaling',
body=json.dumps(params))
def set_permission(self, stack_id, iam_user_arn, allow_ssh=None,
allow_sudo=None, level=None):
"""
Specifies a user's permissions. For more information, see
`Security and Permissions`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type allow_ssh: boolean
:param allow_ssh: The user is allowed to use SSH to communicate with
the instance.
:type allow_sudo: boolean
:param allow_sudo: The user is allowed to use **sudo** to elevate
privileges.
:type level: string
:param level: The user's permission level, which must be set to one of
the following strings. You cannot set your own permissions level.
+ `deny`
+ `show`
+ `deploy`
+ `manage`
+ `iam_only`
For more information on the permissions associated with these levels,
see `Managing User Permissions`_
"""
params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, }
if allow_ssh is not None:
params['AllowSsh'] = allow_ssh
if allow_sudo is not None:
params['AllowSudo'] = allow_sudo
if level is not None:
params['Level'] = level
return self.make_request(action='SetPermission',
body=json.dumps(params))
def set_time_based_auto_scaling(self, instance_id,
auto_scaling_schedule=None):
"""
Specify the time-based auto scaling configuration for a
specified instance. For more information, see `Managing Load
with Time-based and Load-based Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type auto_scaling_schedule: dict
:param auto_scaling_schedule: An `AutoScalingSchedule` with the
instance schedule.
"""
params = {'InstanceId': instance_id, }
if auto_scaling_schedule is not None:
params['AutoScalingSchedule'] = auto_scaling_schedule
return self.make_request(action='SetTimeBasedAutoScaling',
body=json.dumps(params))
def start_instance(self, instance_id):
"""
Starts a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StartInstance',
body=json.dumps(params))
def start_stack(self, stack_id):
"""
Starts a stack's instances.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StartStack',
body=json.dumps(params))
def stop_instance(self, instance_id):
"""
Stops a specified instance. When you stop a standard instance,
the data disappears and must be reinstalled when you restart
the instance. You can stop an Amazon EBS-backed instance
without losing data. For more information, see `Starting,
Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StopInstance',
body=json.dumps(params))
def stop_stack(self, stack_id):
"""
Stops a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StopStack',
body=json.dumps(params))
def unassign_instance(self, instance_id):
"""
Unassigns a registered instance from all of it's layers. The
instance remains in the stack as an unassigned instance and
can be assigned to another layer, as needed. You cannot use
this action with instances that were created with AWS
OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='UnassignInstance',
body=json.dumps(params))
def unassign_volume(self, volume_id):
"""
Unassigns an assigned Amazon EBS volume. The volume remains
registered with the stack. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='UnassignVolume',
body=json.dumps(params))
def update_app(self, app_id, name=None, description=None,
data_sources=None, type=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
attributes=None, environment=None):
"""
Updates a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type data_sources: list
:param data_sources: The app's data sources.
:type type: string
:param type: The app type.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app's virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether SSL is enabled for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type environment: list
:param environment:
An array of `EnvironmentVariable` objects that specify environment
variables to be associated with the app. You can specify up to ten
environment variables. After you deploy the app, these variables
are defined on the associated app server instances.
This parameter is supported only by Chef 11.10 stacks. If you have
specified one or more environment variables, you cannot modify the
stack's Chef version.
"""
params = {'AppId': app_id, }
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if data_sources is not None:
params['DataSources'] = data_sources
if type is not None:
params['Type'] = type
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
if environment is not None:
params['Environment'] = environment
return self.make_request(action='UpdateApp',
body=json.dumps(params))
def update_elastic_ip(self, elastic_ip, name=None):
"""
Updates a registered Elastic IP address's name. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The address.
:type name: string
:param name: The new name.
"""
params = {'ElasticIp': elastic_ip, }
if name is not None:
params['Name'] = name
return self.make_request(action='UpdateElasticIp',
body=json.dumps(params))
def update_instance(self, instance_id, layer_ids=None,
instance_type=None, auto_scaling_type=None,
hostname=None, os=None, ami_id=None,
ssh_key_name=None, architecture=None,
install_updates_on_boot=None, ebs_optimized=None):
"""
Updates a specified instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The instance's layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type: For load-based or time-based instances, the
type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance's operating system, which must be set to one of
the following.
+ Standard operating systems: An Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
The default option is the current Amazon Linux version, such as `Amazon
Linux 2014.09`. If you set this parameter to `Custom`, you must use
the CreateInstance action's AmiId parameter to specify the custom
AMI that you want to use. For more information on the standard
operating systems, see `Operating Systems`_For more information on
how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id:
A custom AMI ID to be used to create the instance. The AMI should be
based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_
If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type architecture: string
:param architecture: The instance architecture. Instance types do not
necessarily support both architectures. For a list of the
architectures that are supported by the different instance types,
see `Instance Families and Types`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether this is an Amazon EBS-optimized instance.
"""
params = {'InstanceId': instance_id, }
if layer_ids is not None:
params['LayerIds'] = layer_ids
if instance_type is not None:
params['InstanceType'] = instance_type
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if architecture is not None:
params['Architecture'] = architecture
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if ebs_optimized is not None:
params['EbsOptimized'] = ebs_optimized
return self.make_request(action='UpdateInstance',
body=json.dumps(params))
def update_layer(self, layer_id, name=None, shortname=None,
attributes=None, custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
"""
Updates a specified layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorksand by Chef. The short name is also used as the name for
the directory where your app files are installed. It can have a
maximum of 200 characters and must be in the following format:
/\A[a-z0-9\-\_\.]+\Z/.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile to be
used for all of the layer's EC2 instances. For more information
about IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer's
custom security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the
layer's packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer's custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
:type lifecycle_event_configuration: dict
:param lifecycle_event_configuration:
"""
params = {'LayerId': layer_id, }
if name is not None:
params['Name'] = name
if shortname is not None:
params['Shortname'] = shortname
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
if lifecycle_event_configuration is not None:
params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='UpdateLayer',
body=json.dumps(params))
def update_my_user_profile(self, ssh_public_key=None):
"""
Updates a user's SSH public key.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
:type ssh_public_key: string
:param ssh_public_key: The user's SSH public key.
"""
params = {}
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
return self.make_request(action='UpdateMyUserProfile',
body=json.dumps(params))
def update_rds_db_instance(self, rds_db_instance_arn, db_user=None,
db_password=None):
"""
Updates an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
:type db_user: string
:param db_user: The master user name.
:type db_password: string
:param db_password: The database password.
"""
params = {'RdsDbInstanceArn': rds_db_instance_arn, }
if db_user is not None:
params['DbUser'] = db_user
if db_password is not None:
params['DbPassword'] = db_password
return self.make_request(action='UpdateRdsDbInstance',
body=json.dumps(params))
def update_stack(self, stack_id, name=None, attributes=None,
service_role_arn=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, chef_configuration=None,
use_custom_cookbooks=None, custom_cookbooks_source=None,
default_ssh_key_name=None,
default_root_device_type=None,
use_opsworks_security_groups=None):
"""
Updates a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type name: string
:param name: The stack's new name.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. For more information about IAM ARNs, see `Using
Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the stack's
current service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's new host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see CreateStack.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the stack, but you
can override it when you create an instance. For more information,
see `Storage for the Root Device`_.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default.
`UseOpsworksSecurityGroups` allows you to instead provide your own
custom security groups. `UseOpsworksSecurityGroups` has the
following settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
"""
params = {'StackId': stack_id, }
if name is not None:
params['Name'] = name
if attributes is not None:
params['Attributes'] = attributes
if service_role_arn is not None:
params['ServiceRoleArn'] = service_role_arn
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
return self.make_request(action='UpdateStack',
body=json.dumps(params))
def update_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Updates a specified user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name. The allowable characters
are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name
includes other punctuation marks, AWS OpsWorks removes them. For
example, `my.name` will be changed to `myname`. If you do not
specify an SSH user name, AWS OpsWorks generates one from the IAM
user name.
:type ssh_public_key: string
:param ssh_public_key: The user's new SSH public key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Managing User Permissions`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='UpdateUserProfile',
body=json.dumps(params))
def update_volume(self, volume_id, name=None, mount_point=None):
"""
Updates an Amazon EBS volume's name or mount point. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type name: string
:param name: The new name.
:type mount_point: string
:param mount_point: The new mount point.
"""
params = {'VolumeId': volume_id, }
if name is not None:
params['Name'] = name
if mount_point is not None:
params['MountPoint'] = mount_point
return self.make_request(action='UpdateVolume',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
htautau/hhana | mva/plotting/scatter.py | 5 | 13001 |
def draw_scatter(fields,
category,
region,
output_name,
backgrounds,
signals=None,
data=None,
signal_scale=1.,
signal_colors=cm.spring,
classifier=None,
cuts=None,
unblind=False):
nplots = 1
figheight = 6.
figwidth = 6.
background_arrays = []
background_clf_arrays = []
for background in backgrounds:
background_arrays.append(
background.merged_records(
category, region,
fields=fields,
cuts=cuts))
if classifier is not None:
background_clf_arrays.append(
background.scores(
classifier,
category,
region,
cuts=cuts,
systematics=False)['NOMINAL'][0])
if data is not None:
nplots += 1
figwidth += 6.
data_array = data.merged_records(
category, region,
fields=fields,
cuts=cuts)
if classifier is not None:
data_clf_array = data.scores(
classifier,
category,
region,
cuts=cuts)[0]
if signals is not None:
nplots += 1
figwidth += 6.
if data is not None:
signal_index = 3
else:
signal_index = 2
signal_arrays = []
signal_clf_arrays = []
for signal in signals:
signal_arrays.append(
signal.merged_records(
category, region,
fields=fields,
cuts=cuts))
if classifier is not None:
signal_clf_arrays.append(
signal.scores(
classifier,
category,
region,
cuts=cuts,
systematics=False)['NOMINAL'][0])
if classifier is not None:
fields = fields + [classifier]
all_pairs = list(itertools.combinations(fields, 2))
for x, y in all_pairs:
# always make the classifier along the x axis
if not isinstance(y, basestring):
tmp = x
x = y
y = tmp
with_classifier = not isinstance(x, basestring)
plt.figure(figsize=(figwidth, figheight), dpi=200)
axes = []
ax_bkg = plt.subplot(1, nplots, 1)
axes.append(ax_bkg)
if not with_classifier:
xscale = VARIABLES[x].get('scale', 1.)
yscale = VARIABLES[y].get('scale', 1.)
xmin, xmax = float('inf'), float('-inf')
ymin, ymax = float('inf'), float('-inf')
for i, (array, background) in enumerate(zip(background_arrays,
backgrounds)):
if with_classifier:
x_array = background_clf_arrays[i]
else:
x_array = array[x] * xscale
y_array = array[y] * yscale
# update max and min bounds
lxmin, lxmax = x_array.min(), x_array.max()
lymin, lymax = y_array.min(), y_array.max()
if lxmin < xmin:
xmin = lxmin
if lxmax > xmax:
xmax = lxmax
if lymin < ymin:
ymin = lymin
if lymax > ymax:
ymax = lymax
weight = array['weight']
ax_bkg.scatter(
x_array, y_array,
c=background.hist_decor['color'],
label=background.label,
s=weight * 10,
#edgecolors='',
linewidths=1,
marker='o',
alpha=0.75)
if data is not None:
data_ax = plt.subplot(1, nplots, 2)
axes.append(data_ax)
if with_classifier:
x_array = data_clf_array
else:
x_array = data_array[x] * xscale
y_array = data_array[y] * yscale
# if blinded don't show above the midpoint of the BDT score
if with_classifier and not unblind:
midpoint = (x_array.max() + x_array.min()) / 2.
x_array = x_array[data_clf_array < midpoint]
y_array = y_array[data_clf_array < midpoint]
data_ax.text(0.9, 0.2, 'BLINDED',
verticalalignment='center',
horizontalalignment='right',
transform=data_ax.transAxes,
fontsize=20)
# update max and min bounds
lxmin, lxmax = x_array.min(), x_array.max()
lymin, lymax = y_array.min(), y_array.max()
if lxmin < xmin:
xmin = lxmin
if lxmax > xmax:
xmax = lxmax
if lymin < ymin:
ymin = lymin
if lymax > ymax:
ymax = lymax
weight = data_array['weight']
data_ax.scatter(
x_array, y_array,
c='black',
label=data.label,
s=weight * 10,
#edgecolors='',
linewidths=0,
marker='.')
if signal is not None:
sig_ax = plt.subplot(1, nplots, signal_index)
axes.append(sig_ax)
for i, (array, signal) in enumerate(zip(signal_arrays, signals)):
if with_classifier:
x_array = signal_clf_arrays[i]
else:
x_array = array[x] * xscale
y_array = array[y] * yscale
# update max and min bounds
lxmin, lxmax = x_array.min(), x_array.max()
lymin, lymax = y_array.min(), y_array.max()
if lxmin < xmin:
xmin = lxmin
if lxmax > xmax:
xmax = lxmax
if lymin < ymin:
ymin = lymin
if lymax > ymax:
ymax = lymax
color = signal_colors((i + 1) / float(len(signals) + 1))
weight = array['weight']
sig_ax.scatter(
x_array, y_array,
c=color,
label=signal.label,
s=weight * 10 * signal_scale,
#edgecolors='',
linewidths=0,
marker='o',
alpha=0.75)
xwidth = xmax - xmin
ywidth = ymax - ymin
xpad = xwidth * .1
ypad = ywidth * .1
if with_classifier:
x_name = "BDT Score"
x_filename = "bdt_score"
x_units = None
else:
x_name = VARIABLES[x]['title']
x_filename = VARIABLES[x]['filename']
x_units = VARIABLES[x].get('units', None)
y_name = VARIABLES[y]['title']
y_filename = VARIABLES[y]['filename']
y_units = VARIABLES[y].get('units', None)
for ax in axes:
ax.set_xlim(xmin - xpad, xmax + xpad)
ax.set_ylim(ymin - ypad, ymax + ypad)
ax.legend(loc='upper right')
if x_units is not None:
ax.set_xlabel('%s [%s]' % (x_name, x_units))
else:
ax.set_xlabel(x_name)
if y_units is not None:
ax.set_ylabel('%s [%s]' % (y_name, y_units))
else:
ax.set_ylabel(y_name)
plt.suptitle(category.label)
plt.savefig(os.path.join(PLOTS_DIR, 'scatter_%s_%s_%s%s.png') % (
category.name, x_filename, y_filename, output_name),
bbox_inches='tight')
"""
Romain Madar:
Display the 1D histogram of (x_i - <x>)(y_i - <y>) over the events {i}.
The mean of this distribution will be the "usual correlation" but this
plot allows to look at the tails and asymmetry, for data and MC.
"""
def get_2d_field_hist(var):
var_info = VARIABLES[var]
bins = var_info['bins']
min, max = var_info['range']
hist = Hist2D(100, min, max, 100, -1, 1)
return hist
def draw_2d_hist(classifier,
category,
region,
backgrounds,
signals=None,
data=None,
cuts=None,
y=MMC_MASS,
output_suffix=''):
fields = [y]
background_arrays = []
background_clf_arrays = []
for background in backgrounds:
sys_mass = {}
for systematic in iter_systematics(True):
sys_mass[systematic] = (
background.merged_records(
category, region,
fields=fields,
cuts=cuts,
systematic=systematic))
background_arrays.append(sys_mass)
background_clf_arrays.append(
background.scores(
classifier,
category,
region,
cuts=cuts,
systematics=True))
if signals is not None:
signal_arrays = []
signal_clf_arrays = []
for signal in signals:
sys_mass = {}
for systematic in iter_systematics(True):
sys_mass[systematic] = (
signal.merged_records(
category, region,
fields=fields,
cuts=cuts,
systematic=systematic))
signal_arrays.append(sys_mass)
signal_clf_arrays.append(
signal.scores(
classifier,
category,
region,
cuts=cuts,
systematics=True))
xmin, xmax = float('inf'), float('-inf')
if data is not None:
data_array = data.merged_records(
category, region,
fields=fields,
cuts=cuts)
data_clf_array = data.scores(
classifier,
category,
region,
cuts=cuts)[0]
lxmin, lxmax = data_clf_array.min(), data_clf_array.max()
if lxmin < xmin:
xmin = lxmin
if lxmax > xmax:
xmax = lxmax
for array_dict in background_clf_arrays + signal_clf_arrays:
for sys, (array, _) in array_dict.items():
lxmin, lxmax = array.min(), array.max()
if lxmin < xmin:
xmin = lxmin
if lxmax > xmax:
xmax = lxmax
yscale = VARIABLES[y].get('scale', 1.)
if cuts:
output_suffix += '_' + cuts.safe()
output_name = "histos_2d_" + category.name + output_suffix + ".root"
hist_template = get_2d_field_hist(y)
# scale BDT scores such that they are between -1 and 1
xscale = max(abs(xmax), abs(xmin))
with root_open(output_name, 'recreate') as f:
for background, array_dict, clf_dict in zip(backgrounds,
background_arrays,
background_clf_arrays):
for systematic in iter_systematics(True):
x_array = clf_dict[systematic][0] / xscale
y_array = array_dict[systematic][y] * yscale
weight = array_dict[systematic]['weight']
hist = hist_template.Clone(name=background.name +
('_%s' % systematic_name(systematic)))
hist.fill_array(np.c_[y_array, x_array], weights=weight)
hist.Write()
if signal is not None:
for signal, array_dict, clf_dict in zip(signals,
signal_arrays,
signal_clf_arrays):
for systematic in iter_systematics(True):
x_array = clf_dict[systematic][0] / xscale
y_array = array_dict[systematic][y] * yscale
weight = array_dict[systematic]['weight']
hist = hist_template.Clone(name=signal.name +
('_%s' % systematic_name(systematic)))
hist.fill_array(np.c_[y_array, x_array], weights=weight)
hist.Write()
if data is not None:
x_array = data_clf_array / xscale
y_array = data_array[y] * yscale
weight = data_array['weight']
hist = hist_template.Clone(name=data.name)
hist.fill_array(np.c_[y_array, x_array], weights=weight)
hist.Write()
| gpl-3.0 |
davidbgk/udata | udata/tests/test_linkchecker.py | 1 | 8228 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import mock
from datetime import datetime, timedelta
from udata.tests import TestCase
from udata.core.dataset.factories import DatasetFactory, ResourceFactory
from udata.linkchecker.checker import check_resource
class LinkcheckerTestSettings():
LINKCHECKING_ENABLED = True
LINKCHECKING_IGNORE_DOMAINS = ['example-ignore.com']
LINKCHECKING_MIN_CACHE_DURATION = 0.5
LINKCHECKING_UNAVAILABLE_THRESHOLD = 100
LINKCHECKING_MAX_CACHE_DURATION = 100
class LinkcheckerTest(TestCase):
settings = LinkcheckerTestSettings
def setUp(self):
super(LinkcheckerTest, self).setUp()
self.resource = ResourceFactory()
self.dataset = DatasetFactory(resources=[self.resource])
@mock.patch('udata.linkchecker.checker.get_linkchecker')
def test_check_resource_no_linkchecker(self, mock_fn):
mock_fn.return_value = None
res = check_resource(self.resource)
self.assertEquals(res, ({'error': 'No linkchecker configured.'}, 503))
@mock.patch('udata.linkchecker.checker.get_linkchecker')
def test_check_resource_linkchecker_ok(self, mock_fn):
check_res = {'check:status': 200, 'check:available': True,
'check:date': datetime.now()}
class DummyLinkchecker:
def check(self, _):
return check_res
mock_fn.return_value = DummyLinkchecker
res = check_resource(self.resource)
self.assertEquals(res, check_res)
check_res.update({'check:count-availability': 1})
self.assertEquals(self.resource.extras, check_res)
@mock.patch('udata.linkchecker.checker.get_linkchecker')
def test_check_resource_filter_result(self, mock_fn):
check_res = {'check:status': 200, 'dummy': 'dummy'}
class DummyLinkchecker:
def check(self, _):
return check_res
mock_fn.return_value = DummyLinkchecker
res = check_resource(self.resource)
self.assertEquals(res, check_res)
self.assertNotIn('dummy', self.resource.extras)
@mock.patch('udata.linkchecker.checker.get_linkchecker')
def test_check_resource_linkchecker_no_status(self, mock_fn):
class DummyLinkchecker:
def check(self, _):
return {'check:available': True}
mock_fn.return_value = DummyLinkchecker
res = check_resource(self.resource)
self.assertEquals(res,
({'error': 'No status in response from linkchecker'},
503))
@mock.patch('udata.linkchecker.checker.get_linkchecker')
def test_check_resource_linkchecker_check_error(self, mock_fn):
class DummyLinkchecker:
def check(self, _):
return {'check:error': 'ERROR'}
mock_fn.return_value = DummyLinkchecker
res = check_resource(self.resource)
self.assertEquals(res, ({'error': 'ERROR'}, 500))
@mock.patch('udata.linkchecker.checker.get_linkchecker')
def test_check_resource_linkchecker_in_resource(self, mock_fn):
self.resource.extras['check:checker'] = 'another_linkchecker'
self.resource.save()
check_resource(self.resource)
args, kwargs = mock_fn.call_args
self.assertEquals(args, ('another_linkchecker', ))
def test_check_resource_linkchecker_no_check(self):
self.resource.extras['check:checker'] = 'no_check'
self.resource.save()
res = check_resource(self.resource)
self.assertEquals(res.get('check:status'), 204)
self.assertEquals(res.get('check:available'), True)
def test_check_resource_ignored_domain(self):
self.resource.extras = {}
self.resource.url = 'http://example-ignore.com/url'
self.resource.save()
res = check_resource(self.resource)
self.assertEquals(res.get('check:status'), 204)
self.assertEquals(res.get('check:available'), True)
def test_is_need_check(self):
self.resource.extras = {'check:available': True,
'check:date': datetime.now(),
'check:status': 42}
self.assertFalse(self.resource.need_check())
def test_is_need_check_unknown_status(self):
self.resource.extras = {}
self.assertTrue(self.resource.need_check())
def test_is_need_check_cache_expired(self):
self.resource.extras = {
'check:available': True,
'check:date': datetime.now() - timedelta(seconds=3600),
'check:status': 42
}
self.assertTrue(self.resource.need_check())
def test_is_need_check_count_availability(self):
self.resource.extras = {
# should need a new check after 100 * 30s = 3000s < 3600s
'check:count-availability': 100,
'check:available': True,
'check:date': datetime.now() - timedelta(seconds=3600),
'check:status': 42
}
self.assertTrue(self.resource.need_check())
def test_is_need_check_count_availability_expired(self):
self.resource.extras = {
# should need a new check after 150 * 30s = 4500s > 3600s
'check:count-availability': 150,
'check:available': True,
'check:date': datetime.now() - timedelta(seconds=3600),
'check:status': 42
}
self.assertFalse(self.resource.need_check())
def test_is_need_check_count_availability_unavailable(self):
self.resource.extras = {
# should need a new check after 30s < 3600S
# count-availability is below threshold
'check:count-availability': 95,
'check:available': False,
'check:date': datetime.now() - timedelta(seconds=3600),
'check:status': 42
}
self.assertTrue(self.resource.need_check())
@mock.patch('udata.linkchecker.checker.get_linkchecker')
def test_count_availability_increment(self, mock_fn):
check_res = {'check:status': 200, 'check:available': True,
'check:date': datetime.now()}
class DummyLinkchecker:
def check(self, _):
return check_res
mock_fn.return_value = DummyLinkchecker
check_resource(self.resource)
self.assertEquals(self.resource.extras['check:count-availability'], 1)
check_resource(self.resource)
self.assertEquals(self.resource.extras['check:count-availability'], 2)
@mock.patch('udata.linkchecker.checker.get_linkchecker')
def test_count_availability_reset(self, mock_fn):
self.resource.extras = {'check:status': 200, 'check:available': True,
'check:date': datetime.now(),
'check:count-availability': 2}
check_res = {'check:status': 200, 'check:available': False,
'check:date': datetime.now()}
class DummyLinkchecker:
def check(self, _):
return check_res
mock_fn.return_value = DummyLinkchecker
check_resource(self.resource)
self.assertEquals(self.resource.extras['check:count-availability'], 1)
def test_count_availability_threshold(self):
self.resource.extras = {
'check:status': 404,
'check:available': False,
# if it weren't above threshold, should need check (>30s)
# and we're still below max_cache 101 * 0.5 < 100
'check:date': datetime.now() - timedelta(seconds=60),
'check:count-availability': 101
}
self.assertFalse(self.resource.need_check())
def test_count_availability_max_cache_duration(self):
self.resource.extras = {
'check:status': 200,
'check:available': True,
# next check should be at 300 * 0.5 = 150min
# but we are above max cache duration 150min > 100min
# and 120m > 100 min so we should need a new check
'check:date': datetime.now() - timedelta(minutes=120),
'check:count-availability': 300
}
self.assertTrue(self.resource.need_check())
| agpl-3.0 |
vishnumani2009/OpenSource-Open-Ended-Statistical-toolkit | FRONTEND/table.py | 12 | 2437 | from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
class Table(QtGui.QDialog):
def __init__(self,parent = None):
QtGui.QDialog.__init__(self, parent)
self.parent = parent
self.initUI()
def initUI(self):
# Rows
rowsLabel = QtGui.QLabel("Rows: ",self)
self.rows = QtGui.QSpinBox(self)
# Columns
colsLabel = QtGui.QLabel("Columns",self)
self.cols = QtGui.QSpinBox(self)
# Cell spacing (distance between cells)
spaceLabel = QtGui.QLabel("Cell spacing",self)
self.space = QtGui.QSpinBox(self)
# Cell padding (distance between cell and inner text)
padLabel = QtGui.QLabel("Cell padding",self)
self.pad = QtGui.QSpinBox(self)
self.pad.setValue(10)
# Button
insertButton = QtGui.QPushButton("Insert",self)
insertButton.clicked.connect(self.insert)
# Layout
layout = QtGui.QGridLayout()
layout.addWidget(rowsLabel,0,0)
layout.addWidget(self.rows,0,1)
layout.addWidget(colsLabel,1,0)
layout.addWidget(self.cols,1,1)
layout.addWidget(padLabel,2,0)
layout.addWidget(self.pad,2,1)
layout.addWidget(spaceLabel,3,0)
layout.addWidget(self.space,3,1)
layout.addWidget(insertButton,4,0,1,2)
self.setWindowTitle("Insert Table")
self.setGeometry(300,300,200,100)
self.setLayout(layout)
def insert(self):
cursor = self.parent.text.textCursor()
# Get the configurations
rows = self.rows.value()
cols = self.cols.value()
if not rows or not cols:
popup = QtGui.QMessageBox(QtGui.QMessageBox.Warning,
"Parameter error",
"Row and column numbers may not be zero!",
QtGui.QMessageBox.Ok,
self)
popup.show()
else:
padding = self.pad.value()
space = self.space.value()
# Set the padding and spacing
fmt = QtGui.QTextTableFormat()
fmt.setCellPadding(padding)
fmt.setCellSpacing(space)
# Inser the new table
cursor.insertTable(rows,cols,fmt)
self.close()
| gpl-3.0 |
yangleo/cloud-github | openstack_dashboard/dashboards/identity/users/tests.py | 2 | 38692 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from socket import timeout as socket_timeout # noqa
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from mox3.mox import IgnoreArg # noqa
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
USERS_INDEX_URL = reverse('horizon:identity:users:index')
USER_CREATE_URL = reverse('horizon:identity:users:create')
USER_UPDATE_URL = reverse('horizon:identity:users:update', args=[1])
USER_DETAIL_URL = reverse('horizon:identity:users:detail', args=[1])
USER_CHANGE_PASSWORD_URL = reverse('horizon:identity:users:change_password',
args=[1])
class UsersViewTests(test.BaseAdminViewTests):
def _get_default_domain(self):
domain = {"id": self.request.session.get('domain_context',
None),
"name": self.request.session.get('domain_context_name',
None)}
return api.base.APIDictWrapper(domain)
def _get_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
@test.create_stubs({api.keystone: ('user_list',
'get_effective_domain_id',
'domain_lookup')})
def test_index(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
api.keystone.get_effective_domain_id(IgnoreArg()).AndReturn(domain_id)
api.keystone.user_list(IgnoreArg(),
domain=domain_id).AndReturn(users)
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
res = self.client.get(USERS_INDEX_URL)
self.assertTemplateUsed(res, 'identity/users/index.html')
self.assertItemsEqual(res.context['table'].data, users)
if domain_id:
for user in res.context['table'].data:
self.assertItemsEqual(user.domain_id, domain_id)
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index()
@test.create_stubs({api.keystone: ('user_create',
'get_default_domain',
'tenant_list',
'add_tenant_user_role',
'get_default_role',
'roles_for_user',
'role_list')})
def test_create(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
role = self.roles.first()
api.keystone.get_default_domain(IgnoreArg()) \
.AndReturn(domain)
api.keystone.get_default_domain(IgnoreArg(), False) \
.AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain.id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=None).AndReturn(
[self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
description=user.description,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.roles_for_user(IgnoreArg(), user.id, self.tenant.id)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'description': user.description,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'enabled': True,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_stubs({api.keystone: ('user_create',
'get_default_domain',
'add_tenant_user_role',
'tenant_list',
'get_default_role',
'roles_for_user',
'role_list')})
def test_create_with_empty_email(self):
user = self.users.get(id="5")
domain = self._get_default_domain()
domain_id = domain.id
role = self.roles.first()
api.keystone.get_default_domain(IgnoreArg()) \
.AndReturn(domain)
api.keystone.get_default_domain(IgnoreArg(), False) \
.AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain.id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=user.id).AndReturn(
[self.tenants.list(), False])
api.keystone.user_create(IgnoreArg(),
name=user.name,
description=user.description,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id).AndReturn(user)
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()).AndReturn(role)
api.keystone.add_tenant_user_role(IgnoreArg(), self.tenant.id,
user.id, role.id)
api.keystone.roles_for_user(IgnoreArg(), user.id, self.tenant.id)
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'description': user.description,
'email': "",
'enabled': True,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_with_password_mismatch(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain_id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=None).AndReturn(
[self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': "doesntmatch"}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_short(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain_id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=None).AndReturn(
[self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': 'four',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'four'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_long(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain_id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=None).AndReturn(
[self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': 'MoreThanEighteenChars',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'MoreThanEighteenChars'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'user_update_password',
'user_update',
'roles_for_user', )})
def test_update(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest),
domain_id).AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain.id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=user.id).AndReturn(
[self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=user.email,
name=user.name).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'description': user.description,
'email': user.email,
'project': self.tenant.id}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'user_update_password',
'user_update',
'roles_for_user', )})
def test_update_default_project(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
new_project_id = self.tenants.get(id="3").id
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest),
domain_id).AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain.id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=user.id).AndReturn(
[self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=user.email,
name=user.name,
project=new_project_id).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'description': user.description,
'email': user.email,
'project': new_project_id}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'user_update',
'roles_for_user', )})
def test_update_with_no_email_attribute(self):
user = self.users.get(id="5")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest),
domain_id).AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain_id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=user.id).AndReturn(
[self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=user.email,
name=user.name,
project=self.tenant.id).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'description': user.description,
'email': "",
'project': self.tenant.id}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'keystone_can_edit_user',
'roles_for_user', )})
def test_update_with_keystone_can_edit_user_false(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest),
'1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain_id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=user.id).AndReturn(
[self.tenants.list(), False])
api.keystone.keystone_can_edit_user().AndReturn(False)
api.keystone.keystone_can_edit_user().AndReturn(False)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'project': self.tenant.id, }
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
@test.create_stubs({api.keystone: ('user_get',
'user_update_password')})
def test_change_password(self):
user = self.users.get(id="5")
test_password = 'normalpwd'
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.user_update_password(IsA(http.HttpRequest),
user.id,
test_password).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'ChangePasswordForm',
'id': user.id,
'name': user.name,
'password': test_password,
'confirm_password': test_password}
res = self.client.post(USER_CHANGE_PASSWORD_URL, formData)
self.assertNoFormErrors(res)
@test.create_stubs({api.keystone: ('user_get',
'user_verify_admin_password')})
@override_settings(ENFORCE_PASSWORD_CHECK=True)
def test_change_password_validation_for_admin_password(self):
user = self.users.get(id="1")
test_password = 'normalpwd'
admin_password = 'secret'
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.user_verify_admin_password(
IsA(http.HttpRequest), admin_password).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'ChangePasswordForm',
'id': user.id,
'name': user.name,
'password': test_password,
'confirm_password': test_password,
'admin_password': admin_password}
res = self.client.post(USER_CHANGE_PASSWORD_URL, formData)
self.assertFormError(res, "form", None,
['The admin password is incorrect.'])
@test.create_stubs({api.keystone: ('user_get',)})
def test_update_validation_for_password_too_short(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
self.mox.ReplayAll()
formData = {'method': 'ChangePasswordForm',
'id': user.id,
'name': user.name,
'password': 't',
'confirm_password': 't'}
res = self.client.post(USER_CHANGE_PASSWORD_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_get',)})
def test_update_validation_for_password_too_long(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
self.mox.ReplayAll()
formData = {'method': 'ChangePasswordForm',
'id': user.id,
'name': user.name,
'password': 'ThisIsASuperLongPassword',
'confirm_password': 'ThisIsASuperLongPassword'}
res = self.client.post(USER_CHANGE_PASSWORD_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
@test.create_stubs({api.keystone: ('user_update_enabled',
'user_list',
'domain_lookup')})
def test_enable_user(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id).AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
True).AndReturn(user)
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled',
'user_list',
'domain_lookup')})
def test_disable_user(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
self.assertTrue(user.enabled)
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(),
user.id,
False).AndReturn(user)
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_update_enabled',
'user_list',
'domain_lookup')})
def test_enable_disable_user_exception(self):
domain = self._get_default_domain()
domain_id = domain.id
user = self.users.get(id="2")
users = self._get_users(domain_id)
user.enabled = False
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.user_update_enabled(IgnoreArg(), user.id, True) \
.AndRaise(self.exceptions.keystone)
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_list', 'domain_lookup')})
def test_disabling_current_user(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You cannot disable the user you are currently '
u'logged in as.')
@test.create_stubs({api.keystone: ('user_list', 'domain_lookup')})
def test_disabling_current_user_domain_name(self):
domain = self._get_default_domain()
domains = self.domains.list()
domain_id = domain.id
users = self._get_users(domain_id)
domain_lookup = dict((d.id, d.name) for d in domains)
for u in users:
u.domain_name = domain_lookup.get(u.domain_id)
for i in range(0, 2):
api.keystone.domain_lookup(IgnoreArg()).AndReturn(domain_lookup)
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
self.mox.ReplayAll()
formData = {'action': 'users__toggle__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You cannot disable the user you are currently '
u'logged in as.')
@test.create_stubs({api.keystone: ('user_list', 'domain_lookup')})
def test_delete_user_with_improper_permissions(self):
domain = self._get_default_domain()
domain_id = domain.id
users = self._get_users(domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.domain_lookup(IgnoreArg()).AndReturn({domain.id:
domain.name})
self.mox.ReplayAll()
formData = {'action': 'users__delete__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You are not allowed to delete user: %s'
% self.request.user.username)
@test.create_stubs({api.keystone: ('user_list', 'domain_lookup')})
def test_delete_user_with_improper_permissions_domain_name(self):
domain = self._get_default_domain()
domains = self.domains.list()
domain_id = domain.id
users = self._get_users(domain_id)
domain_lookup = dict((d.id, d.name) for d in domains)
for u in users:
u.domain_name = domain_lookup.get(u.domain_id)
for i in range(0, 2):
api.keystone.user_list(IgnoreArg(), domain=domain_id) \
.AndReturn(users)
api.keystone.domain_lookup(IgnoreArg()).AndReturn(domain_lookup)
self.mox.ReplayAll()
formData = {'action': 'users__delete__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You are not allowed to delete user: %s'
% self.request.user.username)
@test.create_stubs({api.keystone: ('user_get', 'tenant_get')})
def test_detail_view(self):
user = self.users.get(id="1")
tenant = self.tenants.get(id=user.project_id)
api.keystone.user_get(IsA(http.HttpRequest), '1').AndReturn(user)
api.keystone.tenant_get(IsA(http.HttpRequest), user.project_id) \
.AndReturn(tenant)
self.mox.ReplayAll()
res = self.client.get(USER_DETAIL_URL, args=[user.id])
self.assertTemplateUsed(res, 'identity/users/detail.html')
self.assertEqual(res.context['user'].name, user.name)
self.assertEqual(res.context['user'].id, user.id)
self.assertEqual(res.context['tenant_name'], tenant.name)
@test.create_stubs({api.keystone: ('user_get',)})
def test_detail_view_with_exception(self):
user = self.users.get(id="1")
api.keystone.user_get(IsA(http.HttpRequest), '1').\
AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
res = self.client.get(USER_DETAIL_URL, args=[user.id])
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',)})
def test_get_update_form_init_values(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest),
domain_id).AndReturn(domain)
api.keystone.tenant_list(IgnoreArg(),
domain=domain_id,
user=user.id) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(USER_UPDATE_URL)
# Check that the form contains the default values as initialized by
# the UpdateView
self.assertEqual(res.context['form']['name'].value(), user.name)
self.assertEqual(res.context['form']['email'].value(), user.email)
self.assertEqual(res.context['form']['description'].value(),
user.description)
self.assertEqual(res.context['form']['project'].value(),
user.project_id)
self.assertEqual(res.context['form']['domain_id'].value(),
user.domain_id)
self.assertEqual(res.context['form']['domain_name'].value(),
domain.name)
@test.create_stubs({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update_tenant',
'user_update_password',
'user_update',
'roles_for_user', )})
def test_update_different_description(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(user)
api.keystone.domain_get(IsA(http.HttpRequest),
domain_id).AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=domain.id).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=user.id).AndReturn(
[self.tenants.list(), False])
api.keystone.user_update(IsA(http.HttpRequest),
user.id,
email=user.email,
name=user.name,
description='changed').AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'description': 'changed',
'email': user.email,
'project': self.tenant.id}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
class SeleniumTests(test.SeleniumAdminTestCase):
def _get_default_domain(self):
domain = {"id": None, "name": None}
return api.base.APIDictWrapper(domain)
@test.create_stubs({api.keystone: ('get_default_domain',
'tenant_list',
'get_default_role',
'role_list',
'user_list',
'domain_lookup')})
def test_modal_create_user_with_passwords_not_matching(self):
domain = self._get_default_domain()
api.keystone.get_default_domain(IgnoreArg()) \
.MultipleTimes().AndReturn(domain)
if api.keystone.VERSIONS.active >= 3:
api.keystone.tenant_list(
IgnoreArg(), domain=None).AndReturn(
[self.tenants.list(), False])
else:
api.keystone.tenant_list(
IgnoreArg(), user=None).AndReturn(
[self.tenants.list(), False])
api.keystone.role_list(IgnoreArg()).AndReturn(self.roles.list())
api.keystone.user_list(IgnoreArg(), domain=None) \
.AndReturn(self.users.list())
api.keystone.domain_lookup(IgnoreArg()).AndReturn({None: None})
api.keystone.get_default_role(IgnoreArg()) \
.AndReturn(self.roles.first())
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url, USERS_INDEX_URL))
# Open the modal menu
self.selenium.find_element_by_id("users__action_create") \
.send_keys("\n")
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_id("id_name"))
self.assertFalse(self._is_element_present("id_confirm_password_error"),
"Password error element shouldn't yet exist.")
self.selenium.find_element_by_id("id_name").send_keys("Test User")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").send_keys("[email protected]")
wait.until(lambda x: self.selenium.find_element_by_id(
"id_confirm_password_error"))
self.assertTrue(self._is_element_present("id_confirm_password_error"),
"Couldn't find password error element.")
@test.create_stubs({api.keystone: ('user_get',)})
def test_update_user_with_passwords_not_matching(self):
api.keystone.user_get(IsA(http.HttpRequest), '1',
admin=True).AndReturn(self.user)
self.mox.ReplayAll()
self.selenium.get("%s%s" % (self.live_server_url,
USER_CHANGE_PASSWORD_URL))
self.assertFalse(self._is_element_present("id_confirm_password_error"),
"Password error element shouldn't yet exist.")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_name").click()
self.assertTrue(self._is_element_present("id_confirm_password_error"),
"Couldn't find password error element.")
def _is_element_present(self, element_id):
try:
self.selenium.find_element_by_id(element_id)
return True
except Exception:
return False
| apache-2.0 |
hailinzeng/infer | infer/lib/capture/javac.py | 19 | 1176 | # Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import subprocess
import traceback
import util
import inferlib
MODULE_NAME = __name__
MODULE_DESCRIPTION = '''Run analysis of code built with a command like:
javac <options> <source files>
Analysis examples:
infer -- javac srcfile.java'''
def gen_instance(*args):
return JavacCapture(*args)
# This creates an empty argparser for the module, which provides only
# description/usage information and no arguments.
create_argparser = util.base_argparser(MODULE_DESCRIPTION, MODULE_NAME)
class JavacCapture:
def __init__(self, args, cmd):
self.analysis = inferlib.Infer(args, cmd[1:])
def capture(self):
try:
self.analysis.start()
return os.EX_OK
except subprocess.CalledProcessError as exc:
if self.analysis.args.debug:
traceback.print_exc()
return exc.returncode
| bsd-3-clause |
devdelay/home-assistant | homeassistant/components/sensor/cpuspeed.py | 11 | 1969 | """
Support for displaying the current CPU speed.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.cpuspeed/
"""
import logging
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['py-cpuinfo==0.2.3']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "CPU speed"
ATTR_VENDOR = 'Vendor ID'
ATTR_BRAND = 'Brand'
ATTR_HZ = 'GHz Advertised'
ICON = 'mdi:pulse'
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the CPU speed sensor."""
add_devices([CpuSpeedSensor(config.get('name', DEFAULT_NAME))])
class CpuSpeedSensor(Entity):
"""Representation a CPU sensor."""
def __init__(self, name):
"""Initialize the sensor."""
self._name = name
self._state = None
self._unit_of_measurement = 'GHz'
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self.info is not None:
return {
ATTR_VENDOR: self.info['vendor_id'],
ATTR_BRAND: self.info['brand'],
ATTR_HZ: round(self.info['hz_advertised_raw'][0]/10**9, 2)
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the state."""
from cpuinfo import cpuinfo
self.info = cpuinfo.get_cpu_info()
self._state = round(float(self.info['hz_actual_raw'][0])/10**9, 2)
| mit |
Jusedawg/SickRage | lib/requests/packages/chardet/cp949prober.py | 2801 | 1782 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
| gpl-3.0 |
vathpela/blivet | tests/vmtests/runvmtests.py | 2 | 6803 | import argparse
import libvirt
import paramiko
import sys
import time
from contextlib import contextmanager
TESTS = ["tests.vmtests.blivet_reset_vmtest.LVMTestCase",
"tests.vmtests.blivet_reset_vmtest.LVMSnapShotTestCase",
"tests.vmtests.blivet_reset_vmtest.LVMThinpTestCase",
"tests.vmtests.blivet_reset_vmtest.LVMThinSnapShotTestCase",
"tests.vmtests.blivet_reset_vmtest.LVMRaidTestCase",
"tests.vmtests.blivet_reset_vmtest.MDRaid0TestCase",
"tests.vmtests.blivet_reset_vmtest.LVMOnMDTestCase"]
SNAP_NAME = "snapshot"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--repo", type=str, help="Git repo with tests", required=True)
parser.add_argument("--branch", type=str, help="Git branch to test", required=True)
parser.add_argument("--connection", type=str, help="Libvirt connection URI", required=True)
parser.add_argument("--name", type=str, help="Name of the virtual machine", required=True)
parser.add_argument("--ip", type=str, help="IP adress of the virtual machine", required=True)
parser.add_argument("--vmpass", type=str, help="Root passphrase for the virtual machine", required=True)
parser.add_argument("--virtpass", type=str, help="Root passphrase for the libvirt host", required=False)
args = parser.parse_args()
return args
def request_cred(credentials, cmd_args):
for credential in credentials:
if credential[0] == libvirt.VIR_CRED_AUTHNAME:
credential[4] = "root"
elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
credential[4] = cmd_args.virtpass
return 0
@contextmanager
def virtual_machine(cmd_args):
auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], request_cred, None]
try:
conn = libvirt.openAuth(cmd_args.connection, auth, 0)
except libvirt.libvirtError as e:
raise RuntimeError("Failed to open connection:\n%s", str(e))
try:
dom = conn.lookupByName(cmd_args.name)
except libvirt.libvirtError:
raise RuntimeError("Virtual machine %s not found", cmd_args.name)
snapshots = dom.snapshotListNames()
if SNAP_NAME in snapshots:
try:
snap = dom.snapshotLookupByName(SNAP_NAME)
snap.delete()
except libvirt.libvirtError as e:
raise RuntimeError("Failed to delete snapshot:\n %s", str(e))
# start the VM
try:
dom.create()
except libvirt.libvirtError as e:
raise RuntimeError("Failed to start virtual machine:%s", str(e))
# wait for virtual machine to boot and create snapshot
time.sleep(120)
with ssh_connection(cmd_args):
try:
snap_xml = "<domainsnapshot><name>%s</name></domainsnapshot>" % SNAP_NAME
dom.snapshotCreateXML(snap_xml)
except libvirt.libvirtError as e:
raise RuntimeError("Failed to create snapshot:\n%s.", str(e))
yield dom
# stop the VM
try:
dom.destroy()
except libvirt.libvirtError as e:
raise RuntimeError("Failed to stop virtual machine:%s", str(e))
# remove the snapshot
try:
snap = dom.snapshotLookupByName(SNAP_NAME)
snap.delete()
except libvirt.libvirtError as e:
raise RuntimeError("Failed to delete snapshot:\n %s", str(e))
@contextmanager
def ssh_connection(cmd_args):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(cmd_args.ip, username="root", password=cmd_args.virtpass)
except paramiko.AuthenticationException:
raise RuntimeError("Authentication failed while trying to connect to virtual machine.")
yield ssh
ssh.close()
def run_tests(cmd_args):
""" Run tests in the VM
:param cmd_args: parsed args from command line
"""
with virtual_machine(cmd_args) as virt:
test_results = []
fails = errors = skips = 0
for test in TESTS:
with ssh_connection(cmd_args) as ssh:
# clone the repository with tests
_stdin, stdout, stderr = ssh.exec_command("git clone %s" % cmd_args.repo)
if stdout.channel.recv_exit_status() != 0:
raise RuntimeError("Failed to clone test repository.")
# switch to selected branch
_stdin, stdout, stderr = ssh.exec_command("cd blivet && git checkout %s" % cmd_args.branch)
if stdout.channel.recv_exit_status() != 0:
raise RuntimeError("Failed to switch to brach %s.\nOutput:\n%s\n%s" %
(cmd_args.branch, stdout.read().decode("utf-8"),
stderr.read().decode("utf-8")))
# run the tests
cmd = "export VM_ENVIRONMENT=1 && cd blivet && \
PYTHONPATH=. python3 -m unittest %s" % test
_stdin, stdout, stderr = ssh.exec_command(cmd)
out = stdout.read().decode("utf-8")
err = stderr.read().decode("utf-8")
ret = stdout.channel.recv_exit_status()
print(out)
print(err)
# save the result
if ret != 0:
if "failures=" in err:
test_results.append((test, "FAILED"))
fails += 1
elif "errors=" in err:
test_results.append((test, "ERROR"))
errors += 1
else:
if "skipped=" in err:
test_results.append((test, "SKIPPED"))
skips += 1
else:
test_results.append((test, "OK"))
# revert to snapshot
try:
snap = virt.snapshotLookupByName(SNAP_NAME)
virt.revertToSnapshot(snap)
except libvirt.libvirtError as e:
raise RuntimeError("Failed to revert to snapshot:\n %s", str(e))
# print combined result of all tests
print("======================================================================")
for result in test_results:
print("%s: %s" % result)
print("----------------------------------------------------------------------")
print("Ran %d tests. %d failures, %d errors, %d skipped." % (len(test_results),
fails, errors, skips))
print("======================================================================")
return 0 if (fails + errors) == 0 else 1
def main():
cmd_args = parse_args()
ret = run_tests(cmd_args)
sys.exit(ret)
if __name__ == "__main__":
main()
| gpl-2.0 |
HEG-Arc/Appagoo | appagoo/allauth/socialaccount/providers/windowslive/views.py | 65 | 1498 | from __future__ import unicode_literals
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import WindowsLiveProvider
class WindowsLiveOAuth2Adapter(OAuth2Adapter):
provider_id = WindowsLiveProvider.id
access_token_url = 'https://login.live.com/oauth20_token.srf'
authorize_url = 'https://login.live.com/oauth20_authorize.srf'
profile_url = 'https://apis.live.net/v5.0/me'
def complete_login(self, request, app, token, **kwargs):
headers = {'Authorization': 'Bearer {0}'.format(token.token)}
resp = requests.get(self.profile_url, headers=headers)
#example of whats returned (in python format):
#{'first_name': 'James', 'last_name': 'Smith',
# 'name': 'James Smith', 'locale': 'en_US', 'gender': None,
# 'emails': {'personal': None, 'account': '[email protected]',
# 'business': None, 'preferred': '[email protected]'},
# 'link': 'https://profile.live.com/',
# 'updated_time': '2014-02-07T00:35:27+0000',
# 'id': '83605e110af6ff98'}
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(WindowsLiveOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(WindowsLiveOAuth2Adapter)
| bsd-3-clause |
Anonymous-X6/django | django/contrib/postgres/forms/hstore.py | 313 | 1484 | import json
from django import forms
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(forms.CharField):
"""A field for HStore data which accepts JSON input."""
widget = forms.Textarea
default_error_messages = {
'invalid_json': _('Could not load JSON data.'),
}
def prepare_value(self, value):
if isinstance(value, dict):
return json.dumps(value)
return value
def to_python(self, value):
if not value:
return {}
if not isinstance(value, dict):
try:
value = json.loads(value)
except ValueError:
raise ValidationError(
self.error_messages['invalid_json'],
code='invalid_json',
)
# Cast everything to strings for ease.
for key, val in value.items():
value[key] = six.text_type(val)
return value
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty dict, if the data or initial value we get
# is None, replace it w/ {}.
initial_value = self.to_python(initial)
return super(HStoreField, self).has_changed(initial_value, data)
| bsd-3-clause |
squishbug/DataScienceProgramming | DataScienceProgramming/Examples/top_hashtags.py | 3 | 1098 | #!/usr/bin/env python
import atexit
import os
import platform
import sys
os.environ["SPARK_HOME"] = '/usr/hdp/2.4.2.0-258/spark'
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
raise ValueError('SPARK_HOME environment variable is not set')
sys.path.insert(0, os.path.join(spark_home, 'python'))
sys.path.insert(0, os.path.join(spark_home, 'python/lib/py4j-0.8.1-src.zip'))
execfile(os.path.join(spark_home, 'python/pyspark/shell.py'))
###sys.path.append('/usr/hdp/2.4.2.0-258/spark/python')
import py4j
import pyspark
from pyspark.context import SparkContext
from pyspark.sql import SQLContext, HiveContext
from pyspark.storagelevel import StorageLevel
import json
def extract_hash(tw):
try:
return json.loads(tw)['entities']['hashtags']
except:
return ()
tweets = sc.textFile("/user/molnar/data/election2012/cache-*.json.gz")
hashtags = tweets.flatMap(extract_hash).map(lambda x: (x['text'], 1))
topcounts = hashtags.reduceByKey(lambda a, b: a+b)
res = topcounts.map(lambda (a,b): (b, a)).sortByKey(0,1).take(20)
print '\n'.join(res)
| cc0-1.0 |
maas/maas | src/metadataserver/builtin_scripts/testing_scripts/fio.py | 1 | 6765 | #!/usr/bin/env python3
#
# fio - Run fio on supplied drive.
#
# Author: Newell Jensen <[email protected]>
# Lee Trager <[email protected]>
#
# Copyright (C) 2017-2020 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# --- Start MAAS 1.0 script metadata ---
# name: fio
# title: Storage benchmark
# description: Run Fio benchmarking against selected storage devices.
# tags: storage
# script_type: testing
# hardware_type: storage
# parallel: instance
# results:
# random_read:
# title: Random read
# description: Read speed when reading randomly from the disk.
# random_read_iops:
# title: Random read IOPS
# description: IOPS when reading randomly from the disk.
# sequential_read:
# title: Sequential read
# description: Read speed when reading sequentialy from the disk.
# sequential_read_iops:
# title: Sequential read IOPS
# description: IOPS when reading sequentialy from the disk.
# random_write:
# title: Random write
# description: Write speed when reading randomly from the disk.
# random_write_iops:
# title: Random write IOPS
# description: IOPS when reading randomly from the disk.
# sequential_write:
# title: Sequential write
# description: Write speed when reading sequentialy from the disk.
# sequential_write_iops:
# title: Sequential write IOPS
# description: IOPS when reading sequentialy from the disk.
# parameters:
# storage:
# type: storage
# argument_format: '{path}'
# packages: {apt: fio}
# destructive: true
# --- End MAAS 1.0 script metadata ---
import argparse
from copy import deepcopy
import json
import os
import re
from subprocess import CalledProcessError, check_output
import sys
import yaml
# When given --output-format=normal,json fio > 3 outputs both normal
# and json format. Older versions just output the normal format.
CMD = [
"sudo",
"-n",
"fio",
"--randrepeat=1",
"--ioengine=libaio",
"--direct=1",
"--gtod_reduce=1",
"--name=fio_test",
"--iodepth=64",
"--size=4G",
"--output-format=normal,json",
]
REGEX = re.compile(
r"""
(
# fio-3+ outputs both formats, this regex pulls out the JSON.
(?P<pre_output>[^\{]*)(?P<json>^{.*^}$\n)(?P<post_output>.*)
) | (
# fio < 3 will only output the normal output. Search for the
# values we need.
(
^\s+(read\s*:|write:).*
bw=(?P<bw>.+)(?P<bw_unit>[KMG]B/s),.*iops=(?P<iops>\d+)
)
)
""",
re.MULTILINE | re.DOTALL | re.VERBOSE,
)
def get_blocksize(blockdevice):
"""Return the block size of the block device."""
blockname = os.path.basename(blockdevice)
with open("/sys/block/%s/queue/physical_block_size" % blockname, "r") as f:
return int(f.read())
def run_cmd(readwrite, result_break=True):
"""Execute `CMD` and return output or exit if error."""
cmd = deepcopy(CMD)
cmd.append("--readwrite=%s" % readwrite)
print("Running command: %s\n" % " ".join(cmd))
try:
stdout = check_output(cmd)
except CalledProcessError as e:
sys.stderr.write("fio failed to run!\n")
sys.stdout.write(e.stdout.decode())
if e.stderr is not None:
sys.stderr.write(e.stderr.decode())
sys.exit(e.returncode)
stdout = stdout.decode()
match = REGEX.search(stdout)
if match is not None:
regex_results = match.groupdict()
else:
regex_results = {}
if regex_results["json"] is not None:
# fio >= 3 - Only print the output, parse the JSON.
full_output = ""
for output in ["pre_output", "post_output"]:
if regex_results[output] is not None:
full_output += regex_results[output].strip()
print(full_output)
fio_dict = json.loads(regex_results["json"])["jobs"][0][
"read" if "read" in readwrite else "write"
]
results = {"bw": int(fio_dict["bw"]), "iops": int(fio_dict["iops"])}
else:
# fio < 3 - Print the output, the regex should of found the results.
print(stdout)
bw = regex_results.get("bw")
if bw is not None:
# JSON output in fio >= 3 always returns bw in KB/s. Normalize here
# so units are always the same.
multiplier = {"KB/s": 1, "MB/s": 1000, "GB/s": 1000 * 1000}
bw = int(float(bw) * multiplier[regex_results["bw_unit"]])
iops = regex_results.get("iops")
if iops is not None:
iops = int(iops)
results = {"bw": bw, "iops": iops}
if result_break:
print("\n%s\n" % str("-" * 80))
return results
def run_fio(blockdevice):
"""Execute fio tests for supplied storage device.
Performs random and sequential read and write tests.
"""
CMD.append("--filename=%s" % blockdevice)
CMD.append("--bs=%s" % get_blocksize(blockdevice))
random_read = run_cmd("randread")
sequential_read = run_cmd("read")
random_write = run_cmd("randwrite")
sequential_write = run_cmd("write", False)
# Write out YAML file if RESULT_PATH is set.
result_path = os.environ.get("RESULT_PATH")
if result_path is not None:
results = {
"results": {
"random_read": "%s KB/s" % random_read["bw"],
"random_read_iops": random_read["iops"],
"sequential_read": "%s KB/s" % sequential_read["bw"],
"sequential_read_iops": sequential_read["iops"],
"random_write": "%s KB/s" % random_write["bw"],
"random_write_iops": random_write["iops"],
"sequential_write": "%s KB/s" % sequential_write["bw"],
"sequential_write_iops": sequential_write["iops"],
}
}
with open(result_path, "w") as results_file:
yaml.safe_dump(results, results_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Fio Hardware Testing.")
parser.add_argument(
"blockdevice", help="The blockdevice you want to test. e.g. /dev/sda"
)
args = parser.parse_args()
sys.exit(run_fio(args.blockdevice))
| agpl-3.0 |
elopezga/ErrorRate | ivi/tektronix/tektronixAWG2000.py | 6 | 23133 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import struct
from numpy import *
from .. import ivi
from .. import fgen
StandardWaveformMapping = {
'sine': 'sin',
'square': 'squ',
'triangle': 'tri',
'ramp_up': 'ramp',
#'ramp_down',
#'dc'
}
class tektronixAWG2000(ivi.Driver, fgen.Base, fgen.StdFunc, fgen.ArbWfm,
fgen.ArbSeq, fgen.SoftwareTrigger, fgen.Burst,
fgen.ArbChannelWfm):
"Tektronix AWG2000 series arbitrary waveform generator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(tektronixAWG2000, self).__init__(*args, **kwargs)
self._output_count = 1
self._arbitrary_sample_rate = 0
self._arbitrary_waveform_number_waveforms_max = 0
self._arbitrary_waveform_size_max = 256*1024
self._arbitrary_waveform_size_min = 64
self._arbitrary_waveform_quantum = 8
self._arbitrary_sequence_number_sequences_max = 0
self._arbitrary_sequence_loop_count_max = 0
self._arbitrary_sequence_length_max = 0
self._arbitrary_sequence_length_min = 0
self._catalog_names = list()
self._arbitrary_waveform_n = 0
self._arbitrary_sequence_n = 0
self._identity_description = "Tektronix AWG2000 series arbitrary waveform generator driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Tektronix"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 5
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['AWG2005','AWG2020','AWG2021','AWG2040','AWG2041']
self._init_outputs()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(tektronixAWG2000, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
if self._driver_operation_simulate:
self._identity_instrument_manufacturer = "Not available while simulating"
self._identity_instrument_model = "Not available while simulating"
self._identity_instrument_firmware_revision = "Not available while simulating"
else:
lst = self._ask("*IDN?").split(",")
self._identity_instrument_manufacturer = lst[0]
self._identity_instrument_model = lst[1]
self._identity_instrument_firmware_revision = lst[3]
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid():
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid():
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
if not self._driver_operation_simulate:
error_code, error_message = self._ask(":evmsg?").split(',')
error_code = int(error_code.split(' ', 1)[1])
if error_code == 1:
self._ask("*esr?")
error_code, error_message = self._ask(":evmsg?").split(',')
error_code = int(error_code.split(' ', 1)[1])
error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
self.driver_operation.invalidate_all_attributes()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
self._write("*TST?")
# wait for test to complete
time.sleep(60)
code = int(self._read())
if code != 0:
message = "Self test failed"
return (code, message)
def _utility_unlock_object(self):
pass
def _init_outputs(self):
try:
super(tektronixAWG2000, self)._init_outputs()
except AttributeError:
pass
self._output_enabled = list()
for i in range(self._output_count):
self._output_enabled.append(False)
def _load_catalog(self):
self._catalog = list()
self._catalog_names = list()
if not self._driver_operation_simulate:
raw = self._ask(":memory:catalog:all?").lower()
raw = raw.split(' ', 1)[1]
l = raw.split(',')
l = [s.strip('"') for s in l]
self._catalog = [l[i:i+3] for i in range(0, len(l), 3)]
self._catalog_names = [l[0] for l in self._catalog]
def _get_output_operation_mode(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_operation_mode[index]
def _set_output_operation_mode(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in OperationMode:
raise ivi.ValueNotSupportedException()
self._output_operation_mode[index] = value
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":output:ch%d:state?" % (index+1)).split(' ', 1)[1]
self._output_enabled[index] = bool(int(resp))
self._set_cache_valid(index=index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write(":output:ch%d:state %d" % (index+1, value))
self._output_enabled[index] = value
self._set_cache_valid(index=index)
def _get_output_impedance(self, index):
index = ivi.get_index(self._output_name, index)
self._output_impedance[index] = 50
return self._output_impedance[index]
def _set_output_impedance(self, index, value):
index = ivi.get_index(self._output_name, index)
value = 50
self._output_impedance[index] = value
def _get_output_mode(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":fg:state?").split(' ', 1)[1]
if int(resp):
self._output_mode[index] = 'function'
else:
self._output_mode[index] = 'arbitrary'
self._set_cache_valid(index=index)
return self._output_mode[index]
def _set_output_mode(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in fgen.OutputMode:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
if value == 'function':
self._write(":fg:state 1")
elif value == 'arbitrary':
self._write(":fg:state 0")
self._output_mode[index] = value
for k in range(self._output_count):
self._set_cache_valid(valid=False,index=k)
self._set_cache_valid(index=index)
def _get_output_reference_clock_source(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":clock:source?").split(' ', 1)[1]
value = resp.lower()
self._output_reference_clock_source[index] = value
self._set_cache_valid(index=index)
return self._output_reference_clock_source[index]
def _set_output_reference_clock_source(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in fgen.SampleClockSource:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":clock:source %s" % value)
self._output_reference_clock_source[index] = value
for k in range(self._output_count):
self._set_cache_valid(valid=False,index=k)
self._set_cache_valid(index=index)
def abort_generation(self):
pass
def initiate_generation(self):
pass
def _get_output_standard_waveform_amplitude(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":fg:ch%d:amplitude?" % (index+1)).split(' ', 1)[1]
self._output_standard_waveform_amplitude[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_amplitude[index]
def _set_output_standard_waveform_amplitude(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":fg:ch%d:amplitude %e" % (index+1, value))
self._output_standard_waveform_amplitude[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_dc_offset(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":fg:ch%d:offset?" % (index+1)).split(' ', 1)[1]
self._output_standard_waveform_dc_offset[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_dc_offset[index]
def _set_output_standard_waveform_dc_offset(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":fg:ch%d:offset %e" % (index+1, value))
self._output_standard_waveform_dc_offset[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_duty_cycle_high(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_standard_waveform_duty_cycle_high[index]
def _set_output_standard_waveform_duty_cycle_high(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_standard_waveform_duty_cycle_high[index] = value
def _get_output_standard_waveform_start_phase(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_standard_waveform_start_phase[index]
def _set_output_standard_waveform_start_phase(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_standard_waveform_start_phase[index] = value
def _get_output_standard_waveform_frequency(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":fg:frequency?").split(' ', 1)[1]
self._output_standard_waveform_frequency[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_frequency[index]
def _set_output_standard_waveform_frequency(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":fg:frequency %e" % value)
self._output_standard_waveform_frequency[index] = value
for k in range(self._output_count):
self._set_cache_valid(valid=False,index=k)
self._set_cache_valid(index=index)
def _get_output_standard_waveform_waveform(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":fg:ch%d:shape?" % (index+1)).split(' ', 1)[1]
value = resp.lower()
value = [k for k,v in StandardWaveformMapping.items() if v==value][0]
self._output_standard_waveform_waveform[index] = value
self._set_cache_valid(index=index)
return self._output_standard_waveform_waveform[index]
def _set_output_standard_waveform_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in StandardWaveformMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":fg:ch%d:shape %s" % (index+1, StandardWaveformMapping[value]))
self._output_standard_waveform_waveform[index] = value
self._set_cache_valid(index=index)
def _get_output_arbitrary_gain(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":ch%d:amplitude?" % (index+1)).split(' ', 1)[1]
self._output_arbitrary_gain[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_arbitrary_gain[index]
def _set_output_arbitrary_gain(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":ch%d:amplitude %e" % (index+1, value))
self._output_arbitrary_gain[index] = value
self._set_cache_valid(index=index)
def _get_output_arbitrary_offset(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":ch%d:offset?" % (index+1)).split(' ', 1)[1]
self._output_arbitrary_offset[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_arbitrary_offset[index]
def _set_output_arbitrary_offset(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":ch%d:offset %e" % (index+1, value))
self._output_arbitrary_offset[index] = value
self._set_cache_valid(index=index)
def _get_output_arbitrary_waveform(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":ch%d:waveform?" % (index+1)).split(' ', 1)[1]
self._output_arbitrary_waveform[index] = resp.strip('"').lower()
self._set_cache_valid(index=index)
return self._output_arbitrary_waveform[index]
def _set_output_arbitrary_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value).lower()
# extension must be wfm
ext = value.split('.').pop()
if ext != 'wfm':
raise ivi.ValueNotSupportedException()
# waveform must exist on arb
self._load_catalog()
if value not in self._catalog_names:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":ch%d:waveform \"%s\"" % (index+1, value))
self._output_arbitrary_waveform[index] = value
def _get_arbitrary_sample_rate(self):
if not self._driver_operation_simulate and not self._get_cache_valid():
resp = self._ask(":clock:frequency?").split(' ', 1)[1]
self._arbitrary_sample_rate = float(resp)
self._set_cache_valid()
return self._arbitrary_sample_rate
def _set_arbitrary_sample_rate(self, value):
value = float(value)
if not self._driver_operation_simulate:
self._write(":clock:frequency %e" % value)
self._arbitrary_sample_rate = value
self._set_cache_valid()
def _get_arbitrary_waveform_number_waveforms_max(self):
return self._arbitrary_waveform_number_waveforms_max
def _get_arbitrary_waveform_size_max(self):
return self._arbitrary_waveform_size_max
def _get_arbitrary_waveform_size_min(self):
return self._arbitrary_waveform_size_min
def _get_arbitrary_waveform_quantum(self):
return self._arbitrary_waveform_quantum
def _arbitrary_waveform_clear(self, handle):
pass
def _arbitrary_waveform_create(self, data):
y = None
x = None
if type(data) == list and type(data[0]) == float:
# list
y = array(data)
elif type(data) == ndarray and len(data.shape) == 1:
# 1D array
y = data
elif type(data) == ndarray and len(data.shape) == 2 and data.shape[0] == 1:
# 2D array, hieght 1
y = data[0]
elif type(data) == ndarray and len(data.shape) == 2 and data.shape[1] == 1:
# 2D array, width 1
y = data[:,0]
else:
x, y = ivi.get_sig(data)
if x is None:
x = arange(0,len(y)) / 10e6
if len(y) % self._arbitrary_waveform_quantum != 0:
raise ivi.ValueNotSupportedException()
xincr = ivi.rms(diff(x))
# get unused handle
self._load_catalog()
have_handle = False
while not have_handle:
self._arbitrary_waveform_n += 1
handle = "w%04d.wfm" % self._arbitrary_waveform_n
have_handle = handle not in self._catalog_names
self._write(":data:destination \"%s\"" % handle)
self._write(":wfmpre:bit_nr 12")
self._write(":wfmpre:bn_fmt rp")
self._write(":wfmpre:byt_nr 2")
self._write(":wfmpre:byt_or msb")
self._write(":wfmpre:encdg bin")
self._write(":wfmpre:pt_fmt y")
self._write(":wfmpre:yzero 0")
self._write(":wfmpre:ymult %e" % (2/(1<<12)))
self._write(":wfmpre:xincr %e" % xincr)
raw_data = b''
for f in y:
# clip at -1 and 1
if f > 1.0: f = 1.0
if f < -1.0: f = -1.0
f = (f + 1) / 2
# scale to 12 bits
i = int(f * ((1 << 12) - 2) + 0.5) & 0x000fffff
# add to raw data, MSB first
raw_data = raw_data + struct.pack('>H', i)
self._write_ieee_block(raw_data, ':curve ')
return handle
def _get_arbitrary_sequence_number_sequences_max(self):
return self._arbitrary_sequence_number_sequences_max
def _get_arbitrary_sequence_loop_count_max(self):
return self._arbitrary_sequence_loop_count_max
def _get_arbitrary_sequence_length_max(self):
return self._arbitrary_sequence_length_max
def _get_arbitrary_sequence_length_min(self):
return self._arbitrary_sequence_length_min
def _arbitrary_clear_memory(self):
pass
def _arbitrary_sequence_clear(self, handle):
pass
def _arbitrary_sequence_configure(self, index, handle, gain, offset):
pass
def _arbitrary_sequence_create(self, handle_list, loop_count_list):
return "handle"
def send_software_trigger(self):
if not self._driver_operation_simulate:
self._write("*TRG")
def _get_output_burst_count(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_burst_count[index]
def _set_output_burst_count(self, index, value):
index = ivi.get_index(self._output_name, index)
value = int(value)
self._output_burst_count[index] = value
def _arbitrary_waveform_create_channel_waveform(self, index, data):
handle = self._arbitrary_waveform_create(data)
self._set_output_arbitrary_waveform(index, handle)
return handle
| mit |
mvo5/snapcraft | snapcraft/internal/build_providers/errors.py | 1 | 8705 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import shlex
from typing import Any, Dict, Optional
from typing import Sequence # noqa: F401
from snapcraft.internal.errors import SnapcraftError as _SnapcraftError
class ProviderBaseError(_SnapcraftError):
"""Base Exception for all provider related exceptions."""
class ProviderNotSupportedError(ProviderBaseError):
fmt = (
"The {provider!r} provider is not supported, please choose a "
"different one and try again."
)
def __init__(self, *, provider: str) -> None:
super().__init__(provider=provider)
class ProviderNotFound(ProviderBaseError):
fmt = "You need {provider!r} set-up to build snaps: {error_message}."
def __init__(
self, *, provider: str, prompt_installable: bool, error_message: str
) -> None:
super().__init__(
provider=provider,
prompt_installable=prompt_installable,
error_message=error_message,
)
self.prompt_installable = prompt_installable
self.provider = provider
class _GenericProviderError(ProviderBaseError):
_FMT_ERROR_MESSAGE_AND_EXIT_CODE = (
"An error occurred with the instance when trying to {action} with "
"{provider_name!r}: returned exit code {exit_code!r}: {error_message}.\n"
"Ensure that {provider_name!r} is setup correctly and try again."
)
_FMT_ERROR_MESSAGE = (
"An error occurred with the instance when trying to {action} with "
"{provider_name!r}: {error_message}.\n"
"Ensure that {provider_name!r} is setup correctly and try again."
)
_FMT_EXIT_CODE = (
"An error occurred with the instance when trying to {action} with "
"{provider_name!r}: returned exit code {exit_code!r}.\n"
"Ensure that {provider_name!r} is setup correctly and try again."
)
def __init__(
self,
*,
provider_name: str,
action: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
if exit_code is not None and error_message is not None:
fmt = self._FMT_ERROR_MESSAGE_AND_EXIT_CODE
elif error_message:
fmt = self._FMT_ERROR_MESSAGE
elif exit_code:
fmt = self._FMT_EXIT_CODE
else:
raise RuntimeError("error_message nor exit_code are set")
self.fmt = fmt
super().__init__(
provider_name=provider_name,
action=action,
error_message=error_message,
exit_code=exit_code,
)
class ProviderCommunicationError(ProviderBaseError):
fmt = (
"An error occurred when trying to communicate with the "
"{provider_name!r} provider."
)
def __init__(self, *, provider_name: str) -> None:
super().__init__(provider_name=provider_name)
class ProviderLaunchError(_GenericProviderError):
def __init__(
self,
*,
provider_name: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
super().__init__(
action="launch",
provider_name=provider_name,
error_message=error_message,
exit_code=exit_code,
)
class ProviderStartError(_GenericProviderError):
def __init__(
self,
*,
provider_name: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
super().__init__(
action="start",
provider_name=provider_name,
error_message=error_message,
exit_code=exit_code,
)
class ProviderStopError(_GenericProviderError):
def __init__(
self,
*,
provider_name: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
super().__init__(
action="stop",
provider_name=provider_name,
error_message=error_message,
exit_code=exit_code,
)
class ProviderDeleteError(_GenericProviderError):
def __init__(
self,
*,
provider_name: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
super().__init__(
action="delete",
provider_name=provider_name,
error_message=error_message,
exit_code=exit_code,
)
class ProviderExecError(ProviderBaseError):
fmt = (
"An error occurred when trying to execute {command_string!r} with "
"{provider_name!r}: returned exit code {exit_code!r}."
)
def __init__(
self, *, provider_name: str, command: Sequence[str], exit_code: int
) -> None:
command_string = " ".join(shlex.quote(i) for i in command)
super().__init__(
provider_name=provider_name,
command=command,
command_string=command_string,
exit_code=exit_code,
)
class ProviderShellError(_GenericProviderError):
def __init__(
self,
*,
provider_name: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
super().__init__(
action="shell",
provider_name=provider_name,
error_message=error_message,
exit_code=exit_code,
)
class ProviderMountError(_GenericProviderError):
def __init__(
self,
*,
provider_name: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
super().__init__(
action="mount",
provider_name=provider_name,
error_message=error_message,
exit_code=exit_code,
)
class ProviderUnMountError(_GenericProviderError):
def __init__(
self,
*,
provider_name: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
super().__init__(
action="unmount",
provider_name=provider_name,
error_message=error_message,
exit_code=exit_code,
)
class ProviderFileCopyError(_GenericProviderError):
def __init__(
self,
*,
provider_name: str,
error_message: Optional[str] = None,
exit_code: Optional[int] = None
) -> None:
super().__init__(
action="copy files",
provider_name=provider_name,
error_message=error_message,
exit_code=exit_code,
)
class ProviderInfoError(ProviderBaseError):
fmt = (
"An error occurred when using {provider_name!r} to "
"query the status of the instance: returned exit code {exit_code!r}: {stderr!s}."
)
def __init__(self, *, provider_name: str, exit_code: int, stderr: bytes) -> None:
super().__init__(
provider_name=provider_name, exit_code=exit_code, stderr=stderr.decode()
)
class ProviderInstanceNotFoundError(ProviderBaseError):
fmt = "Cannot find an instance named {instance_name!r}."
def __init__(self, *, instance_name: str) -> None:
super().__init__(instance_name=instance_name)
class ProviderInfoDataKeyError(ProviderBaseError):
fmt = (
"The data returned by {provider_name!r} was not expected. "
"It is missing a required key {missing_key!r} in {data!r}."
)
def __init__(
self, *, provider_name: str, missing_key: str, data: Dict[str, Any]
) -> None:
super().__init__(
provider_name=provider_name, missing_key=missing_key, data=data
)
class ProviderBadDataError(ProviderBaseError):
fmt = (
"The data returned by {provider_name!r} was not expected "
"or in the wrong format: {data!r}."
)
def __init__(self, *, provider_name: str, data: str) -> None:
super().__init__(provider_name=provider_name, data=data)
| gpl-3.0 |
vadimtk/chrome4sdp | tools/telemetry/third_party/gsutilz/gslib/__main__.py | 12 | 26565 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for Google Cloud Storage command line tool."""
from __future__ import absolute_import
import ConfigParser
import datetime
import errno
import getopt
import logging
import os
import re
import signal
import socket
import sys
import textwrap
import traceback
# Load the gsutil version number and append it to boto.UserAgent so the value is
# set before anything instantiates boto. This has to run after THIRD_PARTY_DIR
# is modified (done in gsutil.py) but before any calls are made that would cause
# boto.s3.Connection to be loaded - otherwise the Connection class would end up
# with a static reference to the pre-modified version of the UserAgent field,
# so boto requests would not include gsutil/version# in the UserAgent string.
import boto
import gslib
# TODO: gsutil-beta: Cloud SDK scans for this string and performs
# substitution; ensure this works with both apitools and boto.
boto.UserAgent += ' gsutil/%s (%s)' % (gslib.VERSION, sys.platform)
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
boto.UserAgent += ' Cloud SDK Command Line Tool'
if os.environ.get('CLOUDSDK_VERSION'):
boto.UserAgent += ' %s' % os.environ.get('CLOUDSDK_VERSION')
# pylint: disable=g-bad-import-order
# pylint: disable=g-import-not-at-top
import httplib2
import oauth2client
from gslib import wildcard_iterator
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import ProjectIdException
from gslib.cloud_api import ServiceException
from gslib.command_runner import CommandRunner
import gslib.exception
from gslib.exception import CommandException
import apitools.base.py.exceptions as apitools_exceptions
from gslib.util import CreateLock
from gslib.util import GetBotoConfigFileList
from gslib.util import GetCertsFile
from gslib.util import GetCleanupFiles
from gslib.util import GsutilStreamHandler
from gslib.util import ProxyInfoFromEnvironmentVar
from gslib.sig_handling import GetCaughtSignals
from gslib.sig_handling import InitializeSignalHandling
from gslib.sig_handling import RegisterSignalHandler
GSUTIL_CLIENT_ID = '909320924072.apps.googleusercontent.com'
# Google OAuth2 clients always have a secret, even if the client is an installed
# application/utility such as gsutil. Of course, in such cases the "secret" is
# actually publicly known; security depends entirely on the secrecy of refresh
# tokens, which effectively become bearer tokens.
GSUTIL_CLIENT_NOTSOSECRET = 'p3RlpR10xMFh9ZXBS/ZNLYUu'
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
# Cloud SDK installs have a separate client ID / secret.
GSUTIL_CLIENT_ID = '32555940559.apps.googleusercontent.com'
GSUTIL_CLIENT_NOTSOSECRET = 'ZmssLNjJy2998hD4CTg2ejr2'
CONFIG_KEYS_TO_REDACT = ['proxy', 'proxy_port', 'proxy_user', 'proxy_pass']
# We don't use the oauth2 authentication plugin directly; importing it here
# ensures that it's loaded and available by default when an operation requiring
# authentication is performed.
try:
# pylint: disable=unused-import,g-import-not-at-top
import gcs_oauth2_boto_plugin
except ImportError:
pass
DEBUG_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with debug output enabled.
*** Be aware that debug output includes authentication credentials.
*** Make sure to remove the value of the Authorization header for
*** each HTTP request printed to the console prior to posting to
*** a public medium such as a forum post or Stack Overflow.
***************************** WARNING *****************************
""".lstrip()
HTTP_WARNING = """
***************************** WARNING *****************************
*** You are running gsutil with the "https_validate_certificates" config
*** variable set to False. This option should always be set to True in
*** production environments to protect against man-in-the-middle attacks,
*** and leaking of user data.
***************************** WARNING *****************************
""".lstrip()
debug = 0
test_exception_traces = False
# pylint: disable=unused-argument
def _CleanupSignalHandler(signal_num, cur_stack_frame):
"""Cleans up if process is killed with SIGINT, SIGQUIT or SIGTERM."""
_Cleanup()
def _Cleanup():
for fname in GetCleanupFiles():
try:
os.unlink(fname)
except: # pylint: disable=bare-except
pass
def _OutputAndExit(message):
"""Outputs message and exists with code 1."""
from gslib.util import UTF8 # pylint: disable=g-import-not-at-top
if debug >= 2 or test_exception_traces:
stack_trace = traceback.format_exc()
err = ('DEBUG: Exception stack trace:\n %s\n' %
re.sub('\\n', '\n ', stack_trace))
else:
err = '%s\n' % message
try:
sys.stderr.write(err.encode(UTF8))
except UnicodeDecodeError:
# Can happen when outputting invalid Unicode filenames.
sys.stderr.write(err)
sys.exit(1)
def _OutputUsageAndExit(command_runner):
command_runner.RunNamedCommand('help')
sys.exit(1)
class GsutilFormatter(logging.Formatter):
"""A logging.Formatter that supports logging microseconds (%f)."""
def formatTime(self, record, datefmt=None):
if datefmt:
return datetime.datetime.fromtimestamp(record.created).strftime(datefmt)
# Use default implementation if datefmt is not specified.
return super(GsutilFormatter, self).formatTime(record, datefmt=datefmt)
def _ConfigureLogging(level=logging.INFO):
"""Similar to logging.basicConfig() except it always adds a handler."""
log_format = '%(levelname)s %(asctime)s %(filename)s] %(message)s'
date_format = '%m%d %H:%M:%S.%f'
formatter = GsutilFormatter(fmt=log_format, datefmt=date_format)
handler = GsutilStreamHandler()
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(level)
def main():
InitializeSignalHandling()
# Any modules used in initializing multiprocessing variables must be
# imported after importing gslib.__main__.
# pylint: disable=redefined-outer-name,g-import-not-at-top
import gslib.boto_translation
import gslib.command
import gslib.util
from gslib.util import BOTO_IS_SECURE
from gslib.util import CERTIFICATE_VALIDATION_ENABLED
# pylint: disable=unused-variable
from gcs_oauth2_boto_plugin import oauth2_client
# pylint: enable=unused-variable
from gslib.util import MultiprocessingIsAvailable
if MultiprocessingIsAvailable()[0]:
# These setup methods must be called, and, on Windows, they can only be
# called from within an "if __name__ == '__main__':" block.
gslib.util.InitializeMultiprocessingVariables()
gslib.command.InitializeMultiprocessingVariables()
gslib.boto_translation.InitializeMultiprocessingVariables()
# This needs to be done after gslib.util.InitializeMultiprocessingVariables(),
# since otherwise we can't call gslib.util.CreateLock.
try:
# pylint: disable=unused-import,g-import-not-at-top
import gcs_oauth2_boto_plugin
gcs_oauth2_boto_plugin.oauth2_helper.SetFallbackClientIdAndSecret(
GSUTIL_CLIENT_ID, GSUTIL_CLIENT_NOTSOSECRET)
gcs_oauth2_boto_plugin.oauth2_helper.SetLock(CreateLock())
except ImportError:
pass
global debug
global test_exception_traces
if not (2, 6) <= sys.version_info[:3] < (3,):
raise gslib.exception.CommandException(
'gsutil requires python 2.6 or 2.7.')
# In gsutil 4.0 and beyond, we don't use the boto library for the JSON
# API. However, we still store gsutil configuration data in the .boto
# config file for compatibility with previous versions and user convenience.
# Many users have a .boto configuration file from previous versions, and it
# is useful to have all of the configuration for gsutil stored in one place.
command_runner = CommandRunner()
if not BOTO_IS_SECURE:
raise CommandException('\n'.join(textwrap.wrap(
'Your boto configuration has is_secure = False. Gsutil cannot be '
'run this way, for security reasons.')))
headers = {}
parallel_operations = False
quiet = False
version = False
debug = 0
test_exception_traces = False
# If user enters no commands just print the usage info.
if len(sys.argv) == 1:
sys.argv.append('help')
# Change the default of the 'https_validate_certificates' boto option to
# True (it is currently False in boto).
if not boto.config.has_option('Boto', 'https_validate_certificates'):
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
boto.config.setbool('Boto', 'https_validate_certificates', True)
gslib.util.certs_file_lock = CreateLock()
for signal_num in GetCaughtSignals():
RegisterSignalHandler(signal_num, _CleanupSignalHandler)
GetCertsFile()
try:
try:
opts, args = getopt.getopt(sys.argv[1:], 'dDvo:h:mq',
['debug', 'detailedDebug', 'version', 'option',
'help', 'header', 'multithreaded', 'quiet',
'testexceptiontraces'])
except getopt.GetoptError as e:
_HandleCommandException(gslib.exception.CommandException(e.msg))
for o, a in opts:
if o in ('-d', '--debug'):
# Passing debug=2 causes boto to include httplib header output.
debug = 3
elif o in ('-D', '--detailedDebug'):
# We use debug level 3 to ask gsutil code to output more detailed
# debug output. This is a bit of a hack since it overloads the same
# flag that was originally implemented for boto use. And we use -DD
# to ask for really detailed debugging (i.e., including HTTP payload).
if debug == 3:
debug = 4
else:
debug = 3
elif o in ('-?', '--help'):
_OutputUsageAndExit(command_runner)
elif o in ('-h', '--header'):
(hdr_name, _, hdr_val) = a.partition(':')
if not hdr_name:
_OutputUsageAndExit(command_runner)
headers[hdr_name.lower()] = hdr_val
elif o in ('-m', '--multithreaded'):
parallel_operations = True
elif o in ('-q', '--quiet'):
quiet = True
elif o in ('-v', '--version'):
version = True
elif o == '--testexceptiontraces': # Hidden flag for integration tests.
test_exception_traces = True
elif o in ('-o', '--option'):
(opt_section_name, _, opt_value) = a.partition('=')
if not opt_section_name:
_OutputUsageAndExit(command_runner)
(opt_section, _, opt_name) = opt_section_name.partition(':')
if not opt_section or not opt_name:
_OutputUsageAndExit(command_runner)
if not boto.config.has_section(opt_section):
boto.config.add_section(opt_section)
boto.config.set(opt_section, opt_name, opt_value)
httplib2.debuglevel = debug
if debug > 1:
sys.stderr.write(DEBUG_WARNING)
if debug >= 2:
_ConfigureLogging(level=logging.DEBUG)
command_runner.RunNamedCommand('ver', ['-l'])
config_items = []
try:
config_items.extend(boto.config.items('Boto'))
config_items.extend(boto.config.items('GSUtil'))
except ConfigParser.NoSectionError:
pass
for i in xrange(len(config_items)):
config_item_key = config_items[i][0]
if config_item_key in CONFIG_KEYS_TO_REDACT:
config_items[i] = (config_item_key, 'REDACTED')
sys.stderr.write('Command being run: %s\n' % ' '.join(sys.argv))
sys.stderr.write('config_file_list: %s\n' % GetBotoConfigFileList())
sys.stderr.write('config: %s\n' % str(config_items))
elif quiet:
_ConfigureLogging(level=logging.WARNING)
else:
_ConfigureLogging(level=logging.INFO)
# oauth2client uses info logging in places that would better
# correspond to gsutil's debug logging (e.g., when refreshing
# access tokens).
oauth2client.client.logger.setLevel(logging.WARNING)
if not CERTIFICATE_VALIDATION_ENABLED:
sys.stderr.write(HTTP_WARNING)
if version:
command_name = 'version'
elif not args:
command_name = 'help'
else:
command_name = args[0]
_CheckAndWarnForProxyDifferences()
if os.environ.get('_ARGCOMPLETE', '0') == '1':
return _PerformTabCompletion(command_runner)
return _RunNamedCommandAndHandleExceptions(
command_runner, command_name, args=args[1:], headers=headers,
debug_level=debug, parallel_operations=parallel_operations)
finally:
_Cleanup()
def _CheckAndWarnForProxyDifferences():
# If there are both boto config and environment variable config present for
# proxies, unset the environment variable and warn if it differs.
boto_port = boto.config.getint('Boto', 'proxy_port', 0)
if boto.config.get('Boto', 'proxy', None) or boto_port:
for proxy_env_var in ['http_proxy', 'https_proxy', 'HTTPS_PROXY']:
if proxy_env_var in os.environ and os.environ[proxy_env_var]:
differing_values = []
proxy_info = ProxyInfoFromEnvironmentVar(proxy_env_var)
if proxy_info.proxy_host != boto.config.get('Boto', 'proxy', None):
differing_values.append(
'Boto proxy host: "%s" differs from %s proxy host: "%s"' %
(boto.config.get('Boto', 'proxy', None), proxy_env_var,
proxy_info.proxy_host))
if (proxy_info.proxy_user !=
boto.config.get('Boto', 'proxy_user', None)):
differing_values.append(
'Boto proxy user: "%s" differs from %s proxy user: "%s"' %
(boto.config.get('Boto', 'proxy_user', None), proxy_env_var,
proxy_info.proxy_user))
if (proxy_info.proxy_pass !=
boto.config.get('Boto', 'proxy_pass', None)):
differing_values.append(
'Boto proxy password differs from %s proxy password' %
proxy_env_var)
# Only compare ports if at least one is present, since the
# boto logic for selecting default ports has not yet executed.
if ((proxy_info.proxy_port or boto_port) and
proxy_info.proxy_port != boto_port):
differing_values.append(
'Boto proxy port: "%s" differs from %s proxy port: "%s"' %
(boto_port, proxy_env_var, proxy_info.proxy_port))
if differing_values:
sys.stderr.write('\n'.join(textwrap.wrap(
'WARNING: Proxy configuration is present in both the %s '
'environment variable and boto configuration, but '
'configuration differs. boto configuration proxy values will '
'be used. Differences detected:' % proxy_env_var)))
sys.stderr.write('\n%s\n' % '\n'.join(differing_values))
# Regardless of whether the proxy configuration values matched,
# delete the environment variable so as not to confuse boto.
del os.environ[proxy_env_var]
def _HandleUnknownFailure(e):
# Called if we fall through all known/handled exceptions. Allows us to
# print a stacktrace if -D option used.
if debug >= 2:
stack_trace = traceback.format_exc()
sys.stderr.write('DEBUG: Exception stack trace:\n %s\n' %
re.sub('\\n', '\n ', stack_trace))
else:
_OutputAndExit('Failure: %s.' % e)
def _HandleCommandException(e):
if e.informational:
_OutputAndExit(e.reason)
else:
_OutputAndExit('CommandException: %s' % e.reason)
# pylint: disable=unused-argument
def _HandleControlC(signal_num, cur_stack_frame):
"""Called when user hits ^C.
This function prints a brief message instead of the normal Python stack trace
(unless -D option is used).
Args:
signal_num: Signal that was caught.
cur_stack_frame: Unused.
"""
if debug >= 2:
stack_trace = ''.join(traceback.format_list(traceback.extract_stack()))
_OutputAndExit(
'DEBUG: Caught signal %d - Exception stack trace:\n'
' %s' % (signal_num, re.sub('\\n', '\n ', stack_trace)))
else:
_OutputAndExit('Caught signal %d - exiting' % signal_num)
def _HandleSigQuit(signal_num, cur_stack_frame):
"""Called when user hits ^\\, so we can force breakpoint a running gsutil."""
import pdb # pylint: disable=g-import-not-at-top
pdb.set_trace()
def _ConstructAccountProblemHelp(reason):
"""Constructs a help string for an access control error.
Args:
reason: e.reason string from caught exception.
Returns:
Contructed help text.
"""
default_project_id = boto.config.get_value('GSUtil', 'default_project_id')
# pylint: disable=line-too-long, g-inconsistent-quotes
acct_help = (
"Your request resulted in an AccountProblem (403) error. Usually this "
"happens if you attempt to create a bucket without first having "
"enabled billing for the project you are using. Please ensure billing is "
"enabled for your project by following the instructions at "
"`Google Developers Console<https://developers.google.com/console/help/billing>`. ")
if default_project_id:
acct_help += (
"In the project overview, ensure that the Project Number listed for "
"your project matches the project ID (%s) from your boto config file. "
% default_project_id)
acct_help += (
"If the above doesn't resolve your AccountProblem, please send mail to "
"[email protected] requesting assistance, noting the exact command you "
"ran, the fact that you received a 403 AccountProblem error, and your "
"project ID. Please do not post your project ID on StackOverflow. "
"Note: It's possible to use Google Cloud Storage without enabling "
"billing if you're only listing or reading objects for which you're "
"authorized, or if you're uploading objects to a bucket billed to a "
"project that has billing enabled. But if you're attempting to create "
"buckets or upload objects to a bucket owned by your own project, you "
"must first enable billing for that project.")
return acct_help
def _CheckAndHandleCredentialException(e, args):
# Provide detail to users who have no boto config file (who might previously
# have been using gsutil only for accessing publicly readable buckets and
# objects).
# pylint: disable=g-import-not-at-top
from gslib.util import HasConfiguredCredentials
if (not HasConfiguredCredentials() and
not boto.config.get_value('Tests', 'bypass_anonymous_access_warning',
False)):
# The check above allows tests to assert that we get a particular,
# expected failure, rather than always encountering this error message
# when there are no configured credentials. This allows tests to
# simulate a second user without permissions, without actually requiring
# two separate configured users.
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
_OutputAndExit('\n'.join(textwrap.wrap(
'You are attempting to access protected data with no configured '
'credentials. Please visit '
'https://cloud.google.com/console#/project and sign up for an '
'account, and then run the "gcloud auth login" command to '
'configure gsutil to use these credentials.')))
else:
_OutputAndExit('\n'.join(textwrap.wrap(
'You are attempting to access protected data with no configured '
'credentials. Please visit '
'https://cloud.google.com/console#/project and sign up for an '
'account, and then run the "gsutil config" command to configure '
'gsutil to use these credentials.')))
elif (e.reason and
(e.reason == 'AccountProblem' or e.reason == 'Account disabled.' or
'account for the specified project has been disabled' in e.reason)
and ','.join(args).find('gs://') != -1):
_OutputAndExit('\n'.join(textwrap.wrap(
_ConstructAccountProblemHelp(e.reason))))
def _RunNamedCommandAndHandleExceptions(command_runner, command_name, args=None,
headers=None, debug_level=0,
parallel_operations=False):
"""Runs the command with the given command runner and arguments."""
# pylint: disable=g-import-not-at-top
from gslib.util import GetConfigFilePath
from gslib.util import IS_WINDOWS
from gslib.util import IsRunningInteractively
try:
# Catch ^C so we can print a brief message instead of the normal Python
# stack trace. Register as a final signal handler because this handler kills
# the main gsutil process (so it must run last).
RegisterSignalHandler(signal.SIGINT, _HandleControlC, is_final_handler=True)
# Catch ^\ so we can force a breakpoint in a running gsutil.
if not IS_WINDOWS:
RegisterSignalHandler(signal.SIGQUIT, _HandleSigQuit)
return command_runner.RunNamedCommand(command_name, args, headers,
debug_level, parallel_operations)
except AttributeError as e:
if str(e).find('secret_access_key') != -1:
_OutputAndExit('Missing credentials for the given URI(s). Does your '
'boto config file contain all needed credentials?')
else:
_OutputAndExit(str(e))
except gslib.exception.CommandException as e:
_HandleCommandException(e)
except getopt.GetoptError as e:
_HandleCommandException(gslib.exception.CommandException(e.msg))
except boto.exception.InvalidUriError as e:
_OutputAndExit('InvalidUriError: %s.' % e.message)
except gslib.exception.InvalidUrlError as e:
_OutputAndExit('InvalidUrlError: %s.' % e.message)
except boto.auth_handler.NotReadyToAuthenticate:
_OutputAndExit('NotReadyToAuthenticate')
except OSError as e:
_OutputAndExit('OSError: %s.' % e.strerror)
except IOError as e:
if (e.errno == errno.EPIPE or (IS_WINDOWS and e.errno == errno.EINVAL)
and not IsRunningInteractively()):
# If we get a pipe error, this just means that the pipe to stdout or
# stderr is broken. This can happen if the user pipes gsutil to a command
# that doesn't use the entire output stream. Instead of raising an error,
# just swallow it up and exit cleanly.
sys.exit(0)
else:
raise
except wildcard_iterator.WildcardException as e:
_OutputAndExit(e.reason)
except ProjectIdException as e:
_OutputAndExit(
'You are attempting to perform an operation that requires a '
'project id, with none configured. Please re-run '
'gsutil config and make sure to follow the instructions for '
'finding and entering your default project id.')
except BadRequestException as e:
if e.reason == 'MissingSecurityHeader':
_CheckAndHandleCredentialException(e, args)
_OutputAndExit(e)
except AccessDeniedException as e:
_CheckAndHandleCredentialException(e, args)
_OutputAndExit(e)
except ArgumentException as e:
_OutputAndExit(e)
except ServiceException as e:
_OutputAndExit(e)
except apitools_exceptions.HttpError as e:
# These should usually be retried by the underlying implementation or
# wrapped by CloudApi ServiceExceptions, but if we do get them,
# print something useful.
_OutputAndExit('HttpError: %s, %s' % (getattr(e.response, 'status', ''),
e.content or ''))
except socket.error as e:
if e.args[0] == errno.EPIPE:
# Retrying with a smaller file (per suggestion below) works because
# the library code send loop (in boto/s3/key.py) can get through the
# entire file and then request the HTTP response before the socket
# gets closed and the response lost.
_OutputAndExit(
'Got a "Broken pipe" error. This can happen to clients using Python '
'2.x, when the server sends an error response and then closes the '
'socket (see http://bugs.python.org/issue5542). If you are trying to '
'upload a large object you might retry with a small (say 200k) '
'object, and see if you get a more specific error code.'
)
else:
_HandleUnknownFailure(e)
except Exception as e:
# Check for two types of errors related to service accounts. These errors
# appear to be the same except for their messages, but they are caused by
# different problems and both have unhelpful error messages. Moreover,
# the error type belongs to PyOpenSSL, which is not necessarily installed.
if 'mac verify failure' in str(e):
_OutputAndExit(
'Encountered an error while refreshing access token. '
'If you are using a service account,\nplease verify that the '
'gs_service_key_file_password field in your config file,'
'\n%s, is correct.' % GetConfigFilePath())
elif 'asn1 encoding routines' in str(e):
_OutputAndExit(
'Encountered an error while refreshing access token. '
'If you are using a service account,\nplease verify that the '
'gs_service_key_file field in your config file,\n%s, is correct.'
% GetConfigFilePath())
_HandleUnknownFailure(e)
def _PerformTabCompletion(command_runner):
"""Performs gsutil-specific tab completion for the shell."""
# argparse and argcomplete are bundled with the Google Cloud SDK.
# When gsutil is invoked from the Google Cloud SDK, both should be available.
try:
import argcomplete
import argparse
except ImportError as e:
_OutputAndExit('A library required for performing tab completion was'
' not found.\nCause: %s' % e)
parser = argparse.ArgumentParser(add_help=False)
subparsers = parser.add_subparsers()
command_runner.ConfigureCommandArgumentParsers(subparsers)
argcomplete.autocomplete(parser, exit_method=sys.exit)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
defionscode/ansible | lib/ansible/modules/cloud/openstack/os_floating_ip.py | 26 | 9785 | #!/usr/bin/python
# Copyright: (c) 2015, Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_floating_ip
version_added: "2.0"
author: Davide Guerri (@dguerri) <[email protected]>
short_description: Add/Remove floating IP from an instance
extends_documentation_fragment: openstack
description:
- Add or Remove a floating IP to an instance
options:
server:
description:
- The name or ID of the instance to which the IP address
should be assigned.
required: true
network:
description:
- The name or ID of a neutron external network or a nova pool name.
floating_ip_address:
description:
- A floating IP address to attach or to detach. Required only if I(state)
is absent. When I(state) is present can be used to specify a IP address
to attach.
reuse:
description:
- When I(state) is present, and I(floating_ip_address) is not present,
this parameter can be used to specify whether we should try to reuse
a floating IP address already allocated to the project.
type: bool
default: 'no'
fixed_address:
description:
- To which fixed IP of server the floating IP address should be
attached to.
nat_destination:
description:
- The name or id of a neutron private network that the fixed IP to
attach floating IP is on
aliases: ["fixed_network", "internal_network"]
version_added: "2.3"
wait:
description:
- When attaching a floating IP address, specify whether we should
wait for it to appear as attached.
type: bool
default: 'no'
timeout:
description:
- Time to wait for an IP address to appear as attached. See wait.
required: false
default: 60
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
purge:
description:
- When I(state) is absent, indicates whether or not to delete the floating
IP completely, or only detach it from the server. Default is to detach only.
type: bool
default: 'no'
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements: ["openstacksdk"]
'''
EXAMPLES = '''
# Assign a floating IP to the fist interface of `cattle001` from an exiting
# external network or nova pool. A new floating IP from the first available
# external network is allocated to the project.
- os_floating_ip:
cloud: dguerri
server: cattle001
# Assign a new floating IP to the instance fixed ip `192.0.2.3` of
# `cattle001`. If a free floating IP is already allocated to the project, it is
# reused; if not, a new one is created.
- os_floating_ip:
cloud: dguerri
state: present
reuse: yes
server: cattle001
network: ext_net
fixed_address: 192.0.2.3
wait: true
timeout: 180
# Assign a new floating IP from the network `ext_net` to the instance fixed
# ip in network `private_net` of `cattle001`.
- os_floating_ip:
cloud: dguerri
state: present
server: cattle001
network: ext_net
nat_destination: private_net
wait: true
timeout: 180
# Detach a floating IP address from a server
- os_floating_ip:
cloud: dguerri
state: absent
floating_ip_address: 203.0.113.2
server: cattle001
'''
from ansible.module_utils.basic import AnsibleModule, remove_values
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _get_floating_ip(cloud, floating_ip_address):
f_ips = cloud.search_floating_ips(
filters={'floating_ip_address': floating_ip_address})
if not f_ips:
return None
return f_ips[0]
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
network=dict(required=False, default=None),
floating_ip_address=dict(required=False, default=None),
reuse=dict(required=False, type='bool', default=False),
fixed_address=dict(required=False, default=None),
nat_destination=dict(required=False, default=None,
aliases=['fixed_network', 'internal_network']),
wait=dict(required=False, type='bool', default=False),
timeout=dict(required=False, type='int', default=60),
purge=dict(required=False, type='bool', default=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
server_name_or_id = module.params['server']
state = module.params['state']
network = module.params['network']
floating_ip_address = module.params['floating_ip_address']
reuse = module.params['reuse']
fixed_address = module.params['fixed_address']
nat_destination = module.params['nat_destination']
wait = module.params['wait']
timeout = module.params['timeout']
purge = module.params['purge']
sdk, cloud = openstack_cloud_from_module(module)
try:
server = cloud.get_server(server_name_or_id)
if server is None:
module.fail_json(
msg="server {0} not found".format(server_name_or_id))
if state == 'present':
# If f_ip already assigned to server, check that it matches
# requirements.
public_ip = cloud.get_server_public_ip(server)
f_ip = _get_floating_ip(cloud, public_ip) if public_ip else public_ip
if f_ip:
if network:
network_id = cloud.get_network(name_or_id=network)["id"]
else:
network_id = None
# check if we have floting ip on given nat_destination network
if nat_destination:
nat_floating_addrs = [addr for addr in server.addresses.get(
cloud.get_network(nat_destination)['name'], [])
if addr.addr == public_ip and
addr['OS-EXT-IPS:type'] == 'floating']
if len(nat_floating_addrs) == 0:
module.fail_json(msg="server {server} already has a "
"floating-ip on a different "
"nat-destination than '{nat_destination}'"
.format(server=server_name_or_id,
nat_destination=nat_destination))
if all([fixed_address, f_ip.fixed_ip_address == fixed_address,
network, f_ip.network != network_id]):
# Current state definitely conflicts with requirements
module.fail_json(msg="server {server} already has a "
"floating-ip on requested "
"interface but it doesn't match "
"requested network {network}: {fip}"
.format(server=server_name_or_id,
network=network,
fip=remove_values(f_ip,
module.no_log_values)))
if not network or f_ip.network == network_id:
# Requirements are met
module.exit_json(changed=False, floating_ip=f_ip)
# Requirements are vague enough to ignore existing f_ip and try
# to create a new f_ip to the server.
server = cloud.add_ips_to_server(
server=server, ips=floating_ip_address, ip_pool=network,
reuse=reuse, fixed_address=fixed_address, wait=wait,
timeout=timeout, nat_destination=nat_destination)
fip_address = cloud.get_server_public_ip(server)
# Update the floating IP status
f_ip = _get_floating_ip(cloud, fip_address)
module.exit_json(changed=True, floating_ip=f_ip)
elif state == 'absent':
if floating_ip_address is None:
if not server_name_or_id:
module.fail_json(msg="either server or floating_ip_address are required")
server = cloud.get_server(server_name_or_id)
floating_ip_address = cloud.get_server_public_ip(server)
f_ip = _get_floating_ip(cloud, floating_ip_address)
if not f_ip:
# Nothing to detach
module.exit_json(changed=False)
changed = False
if f_ip["fixed_ip_address"]:
cloud.detach_ip_from_server(
server_id=server['id'], floating_ip_id=f_ip['id'])
# Update the floating IP status
f_ip = cloud.get_floating_ip(id=f_ip['id'])
changed = True
if purge:
cloud.delete_floating_ip(f_ip['id'])
module.exit_json(changed=True)
module.exit_json(changed=changed, floating_ip=f_ip)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
if __name__ == '__main__':
main()
| gpl-3.0 |
jcarreiro/jmc-python | imp/dice.py | 1 | 1405 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import string
import random
# Simple recursive descent parser for dice rolls, e.g. '3d6+1d8+4'.
#
# roll := die {('+' | '-') die} ('+' | '-') modifier
# die := number 'd' number
# modifier := number
class StringBuf(object):
def __init__(self, s):
self.s = s
self.pos = 0
def peek(self):
return self.s[self.pos]
def getc(self):
c = self.peek()
self.pos += 1
return c
def ungetc(self):
self.pos -= 1
def tell(self):
return self.pos
class Symbol(object):
NUMBER = 0
D = 1
PLUS = 2
MINUS = 3
def __init__(self, type_, pos, value)
def next_symbol(s):
c = s.getc()
while c in string.whitespace:
c = s.getc()
if c in string.digits:
# start of a number
literal = c
c = s.getc()
while c in string.digits:
literal += c
c = s.getc()
s.ungetc()
sym = (Symbol.NUMBER,
elif c == 'd':
# die indicator
pass
elif c == '+':
# plus sign
pass
elif c == '-':
# minus sign
pass
else:
# unrecognized input
raise ValueError('Syntax error at position ' + s.tell())
return ()
| mit |
nacc/autotest | mirror/source.py | 6 | 7099 | # Copyright 2009 Google Inc. Released under the GPL v2
import os, re, time, urllib2, urlparse, HTMLParser
from autotest.mirror import database
from autotest.client.shared import utils
class source(object):
"""
Abstract Base Class for the source classes.
"""
def __init__(self, database):
self.database = database
def _get_new_files(self, files):
"""
Return a copy of "files" after filtering out known old files
from "files".
"""
old_files = self.database.get_dictionary()
return dict(filter(lambda x: x[0] not in old_files, files.iteritems()))
def get_new_files(self):
raise NotImplementedError('get_new_files not implemented')
def store_files(self, files):
self.database.merge_dictionary(files)
class rsync_source(source):
_cmd_template = '/usr/bin/rsync -rltz --no-motd %s %s/%s'
def __init__(self, database, prefix, excludes = []):
super(rsync_source, self).__init__(database)
self.prefix = prefix
self.exclude = ' '.join(['--exclude "' + x + '"' for x in excludes])
self.sources = []
def _parse_output(self, output, prefix):
"""
Parse rsync's "ls -l" style output and return a dictionary of
database.item indexed by the "name" field.
"""
regex = re.compile(
'-[rwx-]{9} +(\d+) (\d{4}/\d\d/\d\d \d\d:\d\d:\d\d) (.*)')
res = {}
for line in output.splitlines():
match = regex.match(line)
if match:
groups = match.groups()
timestamp = time.mktime(time.strptime(groups[1],
'%Y/%m/%d %H:%M:%S'))
if prefix:
fname = '%s/%s' % (prefix, groups[2])
else:
fname = groups[2]
item = database.item(fname, int(groups[0]), int(timestamp))
res[item.name] = item
return res
def add_path(self, src, prefix=''):
"""
Add paths to synchronize from the source.
"""
self.sources.append((src, prefix))
def get_new_files(self):
"""
Implement source.get_new_files by using rsync listing feature.
"""
files = {}
for src, prefix in self.sources:
output = utils.system_output(self._cmd_template %
(self.exclude, self.prefix, src))
files.update(self._parse_output(output, prefix))
return self._get_new_files(files)
class _ahref_parser(HTMLParser.HTMLParser):
def reset(self, url=None, pattern=None):
HTMLParser.HTMLParser.reset(self)
self.url = url
self.pattern = pattern
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for name, value in attrs:
if name == 'href':
# compose absolute URL if relative "href" found
url = urlparse.urljoin(self.url, value)
if self.pattern.match(url):
self.links.append(url)
def get_ahref_list(self, url, pattern):
self.reset(url, pattern)
self.feed(urllib2.urlopen(url).read())
self.close()
return self.links
class url_source(source):
"""
A simple URL based source that parses HTML to find references to
kernel files.
"""
_extension_pattern = re.compile(r'.*\.[^/.]+$')
def __init__(self, database, prefix):
super(url_source, self).__init__(database)
self.prefix = prefix
self.urls = []
def add_url(self, url, pattern):
"""
Add a URL path to a HTML document with links to kernel files.
@param url: URL path to a HTML file with links to kernel files
(can be either an absolute URL or one relative to self.prefix)
@param pattern: regex pattern to filter kernel files links out of
all othe links found in the HTML document
"""
# if it does not have an extension then it's a directory and it needs
# a trailing '/'. NOTE: there are some false positives such as
# directories named "v2.6" where ".6" will be assumed to be extension.
# In order for these to work the caller must provide a trailing /
if url[-1:] != '/' and not self._extension_pattern.match(url):
url = url + '/'
self.urls.append((url, re.compile(pattern)))
@staticmethod
def _get_item(url):
"""
Get a database.item object by fetching relevant HTTP information
from the document pointed to by the given url.
"""
try:
info = urllib2.urlopen(url).info()
except IOError, err:
# file is referenced but does not exist
print 'WARNING: %s' % err
return None
size = info.get('content-length')
if size:
size = int(size)
else:
size = -1
timestamp = int(time.mktime(info.getdate('date')))
if not timestamp:
timestamp = 0
return database.item(url, size, timestamp)
def get_new_files(self):
parser = _ahref_parser()
files = {}
for url, pattern in self.urls:
links = parser.get_ahref_list(urlparse.urljoin(self.prefix, url),
pattern)
for link in links:
item = self._get_item(link)
if item:
files[item.name] = item
return self._get_new_files(files)
class directory_source(source):
"""
Source that finds kernel files by listing the contents of a directory.
"""
def __init__(self, database, path):
"""
Initialize a directory_source instance.
@param database: Persistent database with known kernels information.
@param path: Path to the directory with the kernel files found by
this source.
"""
super(directory_source, self).__init__(database)
self._path = path
def get_new_files(self, _stat_func=os.stat):
"""
Main function, see source.get_new_files().
@param _stat_func: Used for unit testing, if we stub os.stat in the
unit test then unit test failures get reported confusingly
because the unit test framework tries to stat() the unit test
file.
"""
all_files = {}
for filename in os.listdir(self._path):
full_filename = os.path.join(self._path, filename)
try:
stat_data = _stat_func(full_filename)
except OSError:
# File might have been removed/renamed since we listed the
# directory so skip it.
continue
item = database.item(full_filename, stat_data.st_size,
int(stat_data.st_mtime))
all_files[filename] = item
return self._get_new_files(all_files)
| gpl-2.0 |
orlenko/bccf | src/mezzanine/twitter/templatetags/twitter_tags.py | 3 | 2015 |
from collections import defaultdict
from mezzanine.conf import settings
from mezzanine.twitter import (QUERY_TYPE_USER, QUERY_TYPE_LIST,
QUERY_TYPE_SEARCH)
from mezzanine.twitter.models import Tweet, TwitterQueryException
from mezzanine import template
register = template.Library()
def tweets_for(query_type, args, per_user=None):
"""
Retrieve tweets for a user, list or search term. The optional
``per_user`` arg limits the number of tweets per user, for
example to allow a fair spread of tweets per user for a list.
"""
lookup = {"query_type": query_type, "value": args[0].strip("\"'")}
try:
tweets = Tweet.objects.get_for(**lookup)
except TwitterQueryException:
return []
if per_user is not None:
_tweets = defaultdict(list)
for tweet in tweets:
if len(_tweets[tweet.user_name]) < per_user:
_tweets[tweet.user_name].append(tweet)
tweets = sum(_tweets.values(), [])
tweets.sort(key=lambda t: t.created_at, reverse=True)
if len(args) > 1 and str(args[-1]).isdigit():
tweets = tweets[:int(args[-1])]
return tweets
@register.as_tag
def tweets_for_user(*args):
"""
Tweets for a user.
"""
return tweets_for(QUERY_TYPE_USER, args)
@register.as_tag
def tweets_for_list(*args):
"""
Tweets for a user's list.
"""
return tweets_for(QUERY_TYPE_LIST, args, per_user=1)
@register.as_tag
def tweets_for_search(*args):
"""
Tweets for a search query.
"""
return tweets_for(QUERY_TYPE_SEARCH, args)
@register.as_tag
def tweets_default(*args):
"""
Tweets for the default settings.
"""
settings.use_editable()
query_type = settings.TWITTER_DEFAULT_QUERY_TYPE
args = (settings.TWITTER_DEFAULT_QUERY,
settings.TWITTER_DEFAULT_NUM_TWEETS)
per_user = None
if query_type == QUERY_TYPE_LIST:
per_user = 1
return tweets_for(query_type, args, per_user=per_user)
| unlicense |
drawks/ansible | lib/ansible/modules/cloud/google/gcp_compute_ssl_policy.py | 12 | 13526 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_ssl_policy
description:
- Represents a SSL policy. SSL policies give you the ability to control the features
of SSL that your SSL proxy or HTTPS load balancer negotiates.
short_description: Creates a GCP SslPolicy
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
description:
description:
- An optional description of this resource.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
profile:
description:
- Profile specifies the set of SSL features that can be used by the load balancer
when negotiating SSL with clients. This can be one of `COMPATIBLE`, `MODERN`,
`RESTRICTED`, or `CUSTOM`. If using `CUSTOM`, the set of SSL features to enable
must be specified in the `customFeatures` field.
required: false
choices:
- COMPATIBLE
- MODERN
- RESTRICTED
- CUSTOM
min_tls_version:
description:
- The minimum version of SSL protocol that can be used by the clients to establish
a connection with the load balancer. This can be one of `TLS_1_0`, `TLS_1_1`,
`TLS_1_2`.
required: false
choices:
- TLS_1_0
- TLS_1_1
- TLS_1_2
custom_features:
description:
- A list of features enabled when the selected profile is CUSTOM. The method returns
the set of features that can be specified in this list. This field must be empty
if the profile is not CUSTOM.
required: false
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/sslPolicies)'
- 'Using SSL Policies: U(https://cloud.google.com/compute/docs/load-balancing/ssl-policies)'
'''
EXAMPLES = '''
- name: create a ssl policy
gcp_compute_ssl_policy:
name: test_object
profile: CUSTOM
min_tls_version: TLS_1_2
custom_features:
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
profile:
description:
- Profile specifies the set of SSL features that can be used by the load balancer
when negotiating SSL with clients. This can be one of `COMPATIBLE`, `MODERN`,
`RESTRICTED`, or `CUSTOM`. If using `CUSTOM`, the set of SSL features to enable
must be specified in the `customFeatures` field.
returned: success
type: str
minTlsVersion:
description:
- The minimum version of SSL protocol that can be used by the clients to establish
a connection with the load balancer. This can be one of `TLS_1_0`, `TLS_1_1`,
`TLS_1_2`.
returned: success
type: str
enabledFeatures:
description:
- The list of features enabled in the SSL policy.
returned: success
type: list
customFeatures:
description:
- A list of features enabled when the selected profile is CUSTOM. The method returns
the set of features that can be specified in this list. This field must be empty
if the profile is not CUSTOM.
returned: success
type: list
fingerprint:
description:
- Fingerprint of this resource. A hash of the contents stored in this object. This
field is used in optimistic locking.
returned: success
type: str
warnings:
description:
- If potential misconfigurations are detected for this SSL policy, this field will
be populated with warning messages.
returned: success
type: complex
contains:
code:
description:
- A warning code, if applicable.
returned: success
type: str
message:
description:
- A human-readable description of the warning code.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
name=dict(required=True, type='str'),
profile=dict(type='str', choices=['COMPATIBLE', 'MODERN', 'RESTRICTED', 'CUSTOM']),
min_tls_version=dict(type='str', choices=['TLS_1_0', 'TLS_1_1', 'TLS_1_2']),
custom_features=dict(type='list', elements='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#sslPolicy'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.patch(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#sslPolicy',
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'profile': module.params.get('profile'),
u'minTlsVersion': module.params.get('min_tls_version'),
u'customFeatures': module.params.get('custom_features'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslPolicies/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/sslPolicies".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': module.params.get('description'),
u'id': response.get(u'id'),
u'name': module.params.get('name'),
u'profile': response.get(u'profile'),
u'minTlsVersion': response.get(u'minTlsVersion'),
u'enabledFeatures': response.get(u'enabledFeatures'),
u'customFeatures': response.get(u'customFeatures'),
u'fingerprint': response.get(u'fingerprint'),
u'warnings': SslPolicyWarningsArray(response.get(u'warnings', []), module).from_response(),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#sslPolicy')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class SslPolicyWarningsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({})
def _response_from_item(self, item):
return remove_nones_from_dict({})
if __name__ == '__main__':
main()
| gpl-3.0 |
disqus/django-old | tests/regressiontests/string_lookup/models.py | 92 | 1199 | # -*- coding: utf-8 -*-
from django.db import models
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __unicode__(self):
return "Foo %s" % self.name
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, related_name='normal_foo')
fwd = models.ForeignKey("Whiz")
back = models.ForeignKey("Foo")
def __unicode__(self):
return "Bar %s" % self.place.name
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return "Whiz %s" % self.name
class Child(models.Model):
parent = models.OneToOneField('Base')
name = models.CharField(max_length=50)
def __unicode__(self):
return "Child %s" % self.name
class Base(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return "Base %s" % self.name
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.IPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
| bsd-3-clause |
wdv4758h/ZipPy | lib-python/3/xml/dom/expatbuilder.py | 51 | 36364 | """Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
from xml.dom.minicompat import *
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
d = node.__dict__
d['data'] = d['nodeValue'] = value
return
else:
node = minidom.Text()
d = node.__dict__
d['data'] = d['nodeValue'] = data
d['ownerDocument'] = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
d = node.__dict__
d['data'] = d['nodeValue'] = node.data + data
return
node = minidom.Text()
d = node.__dict__
d['data'] = d['nodeValue'] = node.data + data
d['ownerDocument'] = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
d = a.childNodes[0].__dict__
d['data'] = d['nodeValue'] = value
d = a.__dict__
d['value'] = d['nodeValue'] = value
d['ownerDocument'] = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"startContainer() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"acceptNode() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
d = a.childNodes[0].__dict__
d['data'] = d['nodeValue'] = uri
d = a.__dict__
d['value'] = d['nodeValue'] = uri
d['ownerDocument'] = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
d = a.childNodes[0].__dict__
d['data'] = d['nodeValue'] = value
d = a.__dict__
d['ownerDocument'] = self.document
d['value'] = d['nodeValue'] = value
d['ownerElement'] = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
| bsd-3-clause |
JingJunYin/tensorflow | tensorflow/contrib/kfac/python/ops/op_queue.py | 23 | 2436 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper for choosing which op to run next in a distributed setting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import ops as tf_ops
class OpQueue(object):
"""Class for choosing which Op to run next.
Constructs an infinitely repeating sequence of Ops in shuffled order.
In K-FAC, this can be used to distribute inverse update operations among
workers.
"""
def __init__(self, ops, seed=None):
"""Initializes an OpQueue.
Args:
ops: list of TensorFlow Ops. Ops to be selected from. All workers must
initialize with the same set of ops.
seed: int or None. Random seed used when shuffling order of ops.
"""
self._ops_by_name = {op.name: op for op in ops}
# Construct a (shuffled) Dataset with Op names.
op_names = tf_ops.convert_to_tensor(list(sorted(op.name for op in ops)))
op_names_dataset = (dataset_ops.Dataset.from_tensor_slices(op_names)
.shuffle(len(ops), seed=seed).repeat())
self._next_op_name = op_names_dataset.make_one_shot_iterator().get_next()
@property
def ops(self):
"""Ops this OpQueue can return in next_op()."""
return self._ops_by_name.values()
def next_op(self, sess):
"""Chooses which op to run next.
Note: This call will make a call to sess.run().
Args:
sess: tf.Session.
Returns:
Next Op chosen from 'ops'.
"""
# In Python 3, type(next_op_name) == bytes. Calling bytes.decode('ascii')
# returns a str.
next_op_name = sess.run(self._next_op_name).decode('ascii')
return self._ops_by_name[next_op_name]
| apache-2.0 |
tangjonathan/HKQuiz | node_modules/pryjs/node_modules/pygmentize-bundled/vendor/pygments/pygments/formatters/rtf.py | 47 | 5058 | # -*- coding: utf-8 -*-
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_int_opt, _surrogatepair
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft® Word® documents.
Please note that ``encoding`` and ``outencoding`` options are ignored.
The RTF format is ASCII natively, but handles unicode characters correctly
thanks to escape sequences.
.. versionadded:: 0.6
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
`fontsize`
Size of the font used. Size is specified in half points. The
default is 24 half-points, giving a size 12 font.
.. versionadded:: 2.0
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
unicodeoutput = False
def __init__(self, **options):
"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0)
def _escape(self, text):
return text.replace('\\', '\\\\') \
.replace('{', '\\{') \
.replace('}', '\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvment
if not text:
return ''
# escape text
text = self._escape(text)
buf = []
for c in text:
cn = ord(c)
if cn < (2**7):
# ASCII character
buf.append(str(c))
elif (2**7) <= cn < (2**16):
# single unicode escape sequence
buf.append(r'{\u%d}' % cn)
elif (2**16) <= cn:
# RTF limits unicode to 16 bits.
# Force surrogate pairs
h,l = _surrogatepair(cn)
buf.append(r'{\u%d}{\u%d}' % (h,l))
return ''.join(buf).replace('\n', '\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write(r'{\rtf1\ansi\uc0\deff0'
r'{\fonttbl{\f0\fmodern\fprq1\fcharset0%s;}}'
r'{\colortbl;' % (self.fontface and
' ' + self._escape(self.fontface) or
''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write(r'\red%d\green%d\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write(r'}\f0 ')
if self.fontsize:
outfile.write(r'\fs%d' % (self.fontsize))
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append(r'\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append(r'\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append(r'\b')
if style['italic']:
buf.append(r'\i')
if style['underline']:
buf.append(r'\ul')
if style['border']:
buf.append(r'\chbrdr\chcfpat%d' %
color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write('{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
outfile.write('}')
| mit |
nhicher/ansible | test/units/module_utils/facts/test_collector.py | 78 | 26241 | # This file is part of Ansible
# -*- coding: utf-8 -*-
#
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from collections import defaultdict
import pprint
# for testing
from units.compat import unittest
from ansible.module_utils.facts import collector
from ansible.module_utils.facts import default_collectors
class TestFindCollectorsForPlatform(unittest.TestCase):
def test(self):
compat_platforms = [{'system': 'Generic'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Generic'))
def test_linux(self):
compat_platforms = [{'system': 'Linux'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Linux'))
def test_linux_or_generic(self):
compat_platforms = [{'system': 'Generic'}, {'system': 'Linux'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Generic', 'Linux'))
class TestSelectCollectorNames(unittest.TestCase):
def _assert_equal_detail(self, obj1, obj2, msg=None):
msg = 'objects are not equal\n%s\n\n!=\n\n%s' % (pprint.pformat(obj1), pprint.pformat(obj2))
return self.assertEqual(obj1, obj2, msg)
def test(self):
collector_names = ['distribution', 'all_ipv4_addresses',
'local', 'pkg_mgr']
all_fact_subsets = self._all_fact_subsets()
res = collector.select_collector_classes(collector_names,
all_fact_subsets)
expected = [default_collectors.DistributionFactCollector,
default_collectors.PkgMgrFactCollector]
self._assert_equal_detail(res, expected)
def test_default_collectors(self):
platform_info = {'system': 'Generic'}
compat_platforms = [platform_info]
collectors_for_platform = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
all_fact_subsets, aliases_map = collector.build_fact_id_to_collector_map(collectors_for_platform)
all_valid_subsets = frozenset(all_fact_subsets.keys())
collector_names = collector.get_collector_names(valid_subsets=all_valid_subsets,
aliases_map=aliases_map,
platform_info=platform_info)
complete_collector_names = collector._solve_deps(collector_names, all_fact_subsets)
dep_map = collector.build_dep_data(complete_collector_names, all_fact_subsets)
ordered_deps = collector.tsort(dep_map)
ordered_collector_names = [x[0] for x in ordered_deps]
res = collector.select_collector_classes(ordered_collector_names,
all_fact_subsets)
self.assertTrue(res.index(default_collectors.ServiceMgrFactCollector) >
res.index(default_collectors.DistributionFactCollector),
res)
self.assertTrue(res.index(default_collectors.ServiceMgrFactCollector) >
res.index(default_collectors.PlatformFactCollector),
res)
def _all_fact_subsets(self, data=None):
all_fact_subsets = defaultdict(list)
_data = {'pkg_mgr': [default_collectors.PkgMgrFactCollector],
'distribution': [default_collectors.DistributionFactCollector],
'network': [default_collectors.LinuxNetworkCollector]}
data = data or _data
for key, value in data.items():
all_fact_subsets[key] = value
return all_fact_subsets
class TestGetCollectorNames(unittest.TestCase):
def test_none(self):
res = collector.get_collector_names()
self.assertIsInstance(res, set)
self.assertEqual(res, set([]))
def test_empty_sets(self):
res = collector.get_collector_names(valid_subsets=frozenset([]),
minimal_gather_subset=frozenset([]),
gather_subset=[])
self.assertIsInstance(res, set)
self.assertEqual(res, set([]))
def test_empty_valid_and_min_with_all_gather_subset(self):
res = collector.get_collector_names(valid_subsets=frozenset([]),
minimal_gather_subset=frozenset([]),
gather_subset=['all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set([]))
def test_one_valid_with_all_gather_subset(self):
valid_subsets = frozenset(['my_fact'])
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=frozenset([]),
gather_subset=['all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set(['my_fact']))
def _compare_res(self, gather_subset1, gather_subset2,
valid_subsets=None, min_subset=None):
valid_subsets = valid_subsets or frozenset()
minimal_gather_subset = min_subset or frozenset()
res1 = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset1)
res2 = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset2)
return res1, res2
def test_not_all_other_order(self):
valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact'])
res1, res2 = self._compare_res(['!all', 'whatever'],
['whatever', '!all'],
valid_subsets=valid_subsets,
min_subset=minimal_gather_subset)
self.assertEqual(res1, res2)
self.assertEqual(res1, set(['min_fact', 'whatever']))
def test_not_all_other_order_min(self):
valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact'])
res1, res2 = self._compare_res(['!min_fact', 'whatever'],
['whatever', '!min_fact'],
valid_subsets=valid_subsets,
min_subset=minimal_gather_subset)
self.assertEqual(res1, res2)
self.assertEqual(res1, set(['whatever']))
def test_one_minimal_with_all_gather_subset(self):
my_fact = 'my_fact'
valid_subsets = frozenset([my_fact])
minimal_gather_subset = valid_subsets
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set(['my_fact']))
def test_with_all_gather_subset(self):
valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['my_fact'])
# even with '!all', the minimal_gather_subset should be returned
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set(['my_fact', 'something_else', 'whatever']))
def test_one_minimal_with_not_all_gather_subset(self):
valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['my_fact'])
# even with '!all', the minimal_gather_subset should be returned
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['!all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set(['my_fact']))
def test_gather_subset_excludes(self):
valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact', 'min_another'])
# even with '!all', the minimal_gather_subset should be returned
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
# gather_subset=set(['all', '!my_fact', '!whatever']))
# gather_subset=['all', '!my_fact', '!whatever'])
gather_subset=['!min_fact', '!whatever'])
self.assertIsInstance(res, set)
# min_another is in minimal_gather_subset, so always returned
self.assertEqual(res, set(['min_another']))
def test_gather_subset_excludes_ordering(self):
valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['my_fact'])
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['!all', 'whatever'])
self.assertIsInstance(res, set)
# excludes are higher precedence than includes, so !all excludes everything
# and then minimal_gather_subset is added. so '!all', 'other' == '!all'
self.assertEqual(res, set(['my_fact', 'whatever']))
def test_gather_subset_excludes_min(self):
valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact'])
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['whatever', '!min'])
self.assertIsInstance(res, set)
# excludes are higher precedence than includes, so !all excludes everything
# and then minimal_gather_subset is added. so '!all', 'other' == '!all'
self.assertEqual(res, set(['whatever']))
def test_gather_subset_excludes_min_and_all(self):
valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact'])
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['whatever', '!all', '!min'])
self.assertIsInstance(res, set)
# excludes are higher precedence than includes, so !all excludes everything
# and then minimal_gather_subset is added. so '!all', 'other' == '!all'
self.assertEqual(res, set(['whatever']))
def test_invaid_gather_subset(self):
valid_subsets = frozenset(['my_fact', 'something_else'])
minimal_gather_subset = frozenset(['my_fact'])
self.assertRaisesRegexp(TypeError,
r'Bad subset .* given to Ansible.*allowed\:.*all,.*my_fact.*',
collector.get_collector_names,
valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['my_fact', 'not_a_valid_gather_subset'])
class TestFindUnresolvedRequires(unittest.TestCase):
def test(self):
names = ['network', 'virtual', 'env']
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
res = collector.find_unresolved_requires(names, all_fact_subsets)
# pprint.pprint(res)
self.assertIsInstance(res, set)
self.assertEqual(res, set(['platform', 'distribution']))
def test_resolved(self):
names = ['network', 'virtual', 'env', 'platform', 'distribution']
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'distribution': [default_collectors.DistributionFactCollector],
'platform': [default_collectors.PlatformFactCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
res = collector.find_unresolved_requires(names, all_fact_subsets)
# pprint.pprint(res)
self.assertIsInstance(res, set)
self.assertEqual(res, set())
class TestBuildDepData(unittest.TestCase):
def test(self):
names = ['network', 'virtual', 'env']
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
res = collector.build_dep_data(names, all_fact_subsets)
# pprint.pprint(dict(res))
self.assertIsInstance(res, defaultdict)
self.assertEqual(dict(res),
{'network': set(['platform', 'distribution']),
'virtual': set(),
'env': set()})
class TestSolveDeps(unittest.TestCase):
def test_no_solution(self):
unresolved = set(['required_thing1', 'required_thing2'])
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
self.assertRaises(collector.CollectorNotFoundError,
collector._solve_deps,
unresolved,
all_fact_subsets)
def test(self):
unresolved = set(['env', 'network'])
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector],
'platform': [default_collectors.PlatformFactCollector],
'distribution': [default_collectors.DistributionFactCollector]}
res = collector.resolve_requires(unresolved, all_fact_subsets)
res = collector._solve_deps(unresolved, all_fact_subsets)
self.assertIsInstance(res, set)
for goal in unresolved:
self.assertIn(goal, res)
class TestResolveRequires(unittest.TestCase):
def test_no_resolution(self):
unresolved = ['required_thing1', 'required_thing2']
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
self.assertRaisesRegexp(collector.UnresolvedFactDep,
'unresolved fact dep.*required_thing2',
collector.resolve_requires,
unresolved, all_fact_subsets)
def test(self):
unresolved = ['env', 'network']
all_fact_subsets = {'env': [default_collectors.EnvFactCollector],
'network': [default_collectors.LinuxNetworkCollector],
'virtual': [default_collectors.LinuxVirtualCollector]}
res = collector.resolve_requires(unresolved, all_fact_subsets)
for goal in unresolved:
self.assertIn(goal, res)
def test_exception(self):
unresolved = ['required_thing1']
all_fact_subsets = {}
try:
collector.resolve_requires(unresolved, all_fact_subsets)
except collector.UnresolvedFactDep as exc:
self.assertIn(unresolved[0], '%s' % exc)
class TestTsort(unittest.TestCase):
def test(self):
dep_map = {'network': set(['distribution', 'platform']),
'virtual': set(),
'platform': set(['what_platform_wants']),
'what_platform_wants': set(),
'network_stuff': set(['network'])}
res = collector.tsort(dep_map)
# pprint.pprint(res)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
self.assertTrue(names.index('network_stuff') > names.index('network'))
self.assertTrue(names.index('platform') > names.index('what_platform_wants'))
self.assertTrue(names.index('network') > names.index('platform'))
def test_cycles(self):
dep_map = {'leaf1': set(),
'leaf2': set(),
'node1': set(['node2']),
'node2': set(['node3']),
'node3': set(['node1'])}
self.assertRaises(collector.CycleFoundInFactDeps,
collector.tsort,
dep_map)
def test_just_nodes(self):
dep_map = {'leaf1': set(),
'leaf4': set(),
'leaf3': set(),
'leaf2': set()}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
# not a lot to assert here, any order of the
# results is valid
self.assertEqual(set(names), set(dep_map.keys()))
def test_self_deps(self):
dep_map = {'node1': set(['node1']),
'node2': set(['node2'])}
self.assertRaises(collector.CycleFoundInFactDeps,
collector.tsort,
dep_map)
def test_unsolvable(self):
dep_map = {'leaf1': set(),
'node2': set(['leaf2'])}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
self.assertEqual(set(names), set(dep_map.keys()))
def test_chain(self):
dep_map = {'leaf1': set(['leaf2']),
'leaf2': set(['leaf3']),
'leaf3': set(['leaf4']),
'leaf4': set(),
'leaf5': set(['leaf1'])}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
self.assertEqual(set(names), set(dep_map.keys()))
def test_multi_pass(self):
dep_map = {'leaf1': set(),
'leaf2': set(['leaf3', 'leaf1', 'leaf4', 'leaf5']),
'leaf3': set(['leaf4', 'leaf1']),
'leaf4': set(['leaf1']),
'leaf5': set(['leaf1'])}
res = collector.tsort(dep_map)
self.assertIsInstance(res, list)
names = [x[0] for x in res]
self.assertEqual(set(names), set(dep_map.keys()))
self.assertTrue(names.index('leaf1') < names.index('leaf2'))
for leaf in ('leaf2', 'leaf3', 'leaf4', 'leaf5'):
self.assertTrue(names.index('leaf1') < names.index(leaf))
class TestCollectorClassesFromGatherSubset(unittest.TestCase):
maxDiff = None
def _classes(self,
all_collector_classes=None,
valid_subsets=None,
minimal_gather_subset=None,
gather_subset=None,
gather_timeout=None,
platform_info=None):
platform_info = platform_info or {'system': 'Linux'}
return collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes,
valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset,
gather_timeout=gather_timeout,
platform_info=platform_info)
def test_no_args(self):
res = self._classes()
self.assertIsInstance(res, list)
self.assertEqual(res, [])
def test_not_all(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['!all'])
self.assertIsInstance(res, list)
self.assertEqual(res, [])
def test_all(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['all'])
self.assertIsInstance(res, list)
def test_hardware(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['hardware'])
self.assertIsInstance(res, list)
self.assertIn(default_collectors.PlatformFactCollector, res)
self.assertIn(default_collectors.LinuxHardwareCollector, res)
self.assertTrue(res.index(default_collectors.LinuxHardwareCollector) >
res.index(default_collectors.PlatformFactCollector))
def test_network(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['network'])
self.assertIsInstance(res, list)
self.assertIn(default_collectors.DistributionFactCollector, res)
self.assertIn(default_collectors.PlatformFactCollector, res)
self.assertIn(default_collectors.LinuxNetworkCollector, res)
self.assertTrue(res.index(default_collectors.LinuxNetworkCollector) >
res.index(default_collectors.PlatformFactCollector))
self.assertTrue(res.index(default_collectors.LinuxNetworkCollector) >
res.index(default_collectors.DistributionFactCollector))
# self.assertEqual(set(res, [default_collectors.DistributionFactCollector,
# default_collectors.PlatformFactCollector,
# default_collectors.LinuxNetworkCollector])
def test_env(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['env'])
self.assertIsInstance(res, list)
self.assertEqual(res, [default_collectors.EnvFactCollector])
def test_facter(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=set(['env', 'facter']))
self.assertIsInstance(res, list)
self.assertEqual(set(res),
set([default_collectors.EnvFactCollector,
default_collectors.FacterFactCollector]))
def test_facter_ohai(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=set(['env', 'facter', 'ohai']))
self.assertIsInstance(res, list)
self.assertEqual(set(res),
set([default_collectors.EnvFactCollector,
default_collectors.FacterFactCollector,
default_collectors.OhaiFactCollector]))
def test_just_facter(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=set(['facter']))
self.assertIsInstance(res, list)
self.assertEqual(set(res),
set([default_collectors.FacterFactCollector]))
def test_collector_specified_multiple_times(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['platform', 'all', 'machine'])
self.assertIsInstance(res, list)
self.assertIn(default_collectors.PlatformFactCollector,
res)
def test_unknown_collector(self):
# something claims 'unknown_collector' is a valid gather_subset, but there is
# no FactCollector mapped to 'unknown_collector'
self.assertRaisesRegexp(TypeError,
r'Bad subset.*unknown_collector.*given to Ansible.*allowed\:.*all,.*env.*',
self._classes,
all_collector_classes=default_collectors.collectors,
gather_subset=['env', 'unknown_collector'])
| gpl-3.0 |
caseylucas/ansible-modules-core | cloud/amazon/route53.py | 40 | 22225 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: route53
version_added: "1.3"
short_description: add or delete entries in Amazons Route53 DNS service
description:
- Creates and deletes DNS records in Amazons Route53 service
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'get', 'create', 'delete' ]
zone:
description:
- The DNS zone to modify
required: true
hosted_zone_id:
description:
- The Hosted Zone ID of the DNS zone to modify
required: false
version_added: "2.0"
default: null
record:
description:
- The full DNS record to create or delete
required: true
ttl:
description:
- The TTL to give the new record
required: false
default: 3600 (one hour)
type:
description:
- The type of DNS record to create
required: true
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS', 'SOA' ]
alias:
description:
- Indicates if this is an alias record.
required: false
version_added: "1.9"
default: False
choices: [ 'True', 'False' ]
alias_hosted_zone_id:
description:
- The hosted zone identifier.
required: false
version_added: "1.9"
default: null
alias_evaluate_target_health:
description:
- Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers.
required: false
version_added: "2.1"
default: false
value:
description:
- The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it.
required: false
default: null
overwrite:
description:
- Whether an existing record should be overwritten on create if values do not match
required: false
default: null
retry_interval:
description:
- In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long.
required: false
default: 500
private_zone:
description:
- If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone.
required: false
default: false
version_added: "1.9"
identifier:
description:
- Have to be specified for Weighted, latency-based and failover resource record sets only. An identifier
that differentiates among multiple resource record sets that have the
same combination of DNS name and type.
required: false
default: null
version_added: "2.0"
weight:
description:
- Weighted resource record sets only. Among resource record sets that
have the same combination of DNS name and type, a value that
determines what portion of traffic for the current resource record set
is routed to the associated location.
required: false
default: null
version_added: "2.0"
region:
description:
- Latency-based resource record sets only Among resource record sets
that have the same combination of DNS name and type, a value that
determines which region this should be associated with for the
latency-based routing
required: false
default: null
version_added: "2.0"
health_check:
description:
- Health check to associate with this record
required: false
default: null
version_added: "2.0"
failover:
description:
- Failover resource record sets only. Whether this is the primary or
secondary resource record set. Allowed values are PRIMARY and SECONDARY
required: false
default: null
version_added: "2.0"
vpc_id:
description:
- "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC."
- This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs.
required: false
default: null
version_added: "2.0"
wait:
description:
- Wait until the changes have been replicated to all Amazon Route 53 DNS servers.
required: false
default: no
version_added: "2.1"
wait_timeout:
description:
- How long to wait for the changes to be replicated, in seconds.
required: false
default: 300
version_added: "2.1"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Mike Buzzetti <[email protected]>"
extends_documentation_fragment: aws
'''
# FIXME: the command stuff should have a more state like configuration alias -- MPD
EXAMPLES = '''
# Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated
- route53:
command: create
zone: foo.com
record: new.foo.com
type: A
ttl: 7200
value: 1.1.1.1,2.2.2.2,3.3.3.3
wait: yes
# Retrieve the details for new.foo.com
- route53:
command: get
zone: foo.com
record: new.foo.com
type: A
register: rec
# Delete new.foo.com A record using the results from the get command
- route53:
command: delete
zone: foo.com
record: "{{ rec.set.record }}"
ttl: "{{ rec.set.ttl }}"
type: "{{ rec.set.type }}"
value: "{{ rec.set.value }}"
# Add an AAAA record. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Add a SRV record with multiple fields for a service on port 22222
# For more information on SRV records see:
# https://en.wikipedia.org/wiki/SRV_record
- route53:
command: "create"
"zone": "foo.com"
"record": "_example-service._tcp.foo.com"
"type": "SRV"
"value": ["0 0 22222 host1.foo.com", "0 0 22222 host2.foo.com"]
# Add a TXT record. Note that TXT and SPF records must be surrounded
# by quotes when sent to Route 53:
- route53:
command: "create"
zone: "foo.com"
record: "localhost.foo.com"
type: "TXT"
ttl: "7200"
value: '"bar"'
# Add an alias record that points to an Amazon ELB:
- route53:
command=create
zone=foo.com
record=elb.foo.com
type=A
value="{{ elb_dns_name }}"
alias=True
alias_hosted_zone_id="{{ elb_zone_id }}"
# Retrieve the details for elb.foo.com
- route53:
command: get
zone: foo.com
record: elb.foo.com
type: A
register: rec
# Delete an alias record using the results from the get command
- route53:
command: delete
zone: foo.com
record: "{{ rec.set.record }}"
ttl: "{{ rec.set.ttl }}"
type: "{{ rec.set.type }}"
value: "{{ rec.set.value }}"
alias: True
alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}"
# Add an alias record that points to an Amazon ELB and evaluates it health:
- route53:
command=create
zone=foo.com
record=elb.foo.com
type=A
value="{{ elb_dns_name }}"
alias=True
alias_hosted_zone_id="{{ elb_zone_id }}"
alias_evaluate_target_health=True
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
hosted_zone_id: "Z2AABBCCDDEEFF"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
hosted_zone_id: "Z2AABBCCDDEEFF"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Use a routing policy to distribute traffic:
- route53:
command: "create"
zone: "foo.com"
record: "www.foo.com"
type: "CNAME"
value: "host1.foo.com"
ttl: 30
# Routing policy
identifier: "host1@www"
weight: 100
health_check: "d994b780-3150-49fd-9205-356abdd42e75"
'''
MINIMUM_BOTO_VERSION = '2.28.0'
WAIT_RETRY_SLEEP = 5 # how many seconds to wait between propagation status polls
import time
import distutils.version
try:
import boto
import boto.ec2
from boto import route53
from boto.route53 import Route53Connection
from boto.route53.record import Record, ResourceRecordSets
from boto.route53.status import Status
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class TimeoutError(Exception):
pass
def get_zone_by_name(conn, module, zone_name, want_private, zone_id, want_vpc_id):
"""Finds a zone by name or zone_id"""
for zone in conn.get_zones():
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
private_zone = module.boolean(zone.config.get('PrivateZone', False))
if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id):
if want_vpc_id:
# NOTE: These details aren't available in other boto methods, hence the necessary
# extra API call
zone_details = conn.get_hosted_zone(zone.id)['GetHostedZoneResponse']
# this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
if isinstance(zone_details['VPCs'], dict):
if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id:
return zone
else: # Forward compatibility for when boto fixes that bug
if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
return zone
else:
return zone
return None
def commit(changes, retry_interval, wait, wait_timeout):
"""Commit changes, but retry PriorRequestNotComplete errors."""
result = None
retry = 10
while True:
try:
retry -= 1
result = changes.commit()
break
except boto.route53.exception.DNSServerError as e:
code = e.body.split("<Code>")[1]
code = code.split("</Code>")[0]
if code != 'PriorRequestNotComplete' or retry < 0:
raise e
time.sleep(float(retry_interval))
if wait:
timeout_time = time.time() + wait_timeout
connection = changes.connection
change = result['ChangeResourceRecordSetsResponse']['ChangeInfo']
status = Status(connection, change)
while status.status != 'INSYNC' and time.time() < timeout_time:
time.sleep(WAIT_RETRY_SLEEP)
status.update()
if time.time() >= timeout_time:
raise TimeoutError()
return result
# Shamelessly copied over from https://git.io/vgmDG
IGNORE_CODE = 'Throttling'
MAX_RETRIES=5
def invoke_with_throttling_retries(function_ref, *argv):
retries=0
while True:
try:
retval=function_ref(*argv)
return retval
except boto.exception.BotoServerError as e:
if e.code != IGNORE_CODE or retries==MAX_RETRIES:
raise e
time.sleep(5 * (2**retries))
retries += 1
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['get', 'create', 'delete'], required=True),
zone = dict(required=True),
hosted_zone_id = dict(required=False, default=None),
record = dict(required=True),
ttl = dict(required=False, type='int', default=3600),
type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS', 'SOA'], required=True),
alias = dict(required=False, type='bool'),
alias_hosted_zone_id = dict(required=False),
alias_evaluate_target_health = dict(required=False, type='bool', default=False),
value = dict(required=False),
overwrite = dict(required=False, type='bool'),
retry_interval = dict(required=False, default=500),
private_zone = dict(required=False, type='bool', default=False),
identifier = dict(required=False, default=None),
weight = dict(required=False, type='int'),
region = dict(required=False),
health_check = dict(required=False),
failover = dict(required=False,choices=['PRIMARY','SECONDARY']),
vpc_id = dict(required=False),
wait = dict(required=False, type='bool', default=False),
wait_timeout = dict(required=False, type='int', default=300),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
if distutils.version.StrictVersion(boto.__version__) < distutils.version.StrictVersion(MINIMUM_BOTO_VERSION):
module.fail_json(msg='Found boto in version %s, but >= %s is required' % (boto.__version__, MINIMUM_BOTO_VERSION))
command_in = module.params.get('command')
zone_in = module.params.get('zone').lower()
hosted_zone_id_in = module.params.get('hosted_zone_id')
ttl_in = module.params.get('ttl')
record_in = module.params.get('record').lower()
type_in = module.params.get('type')
value_in = module.params.get('value')
alias_in = module.params.get('alias')
alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health')
retry_interval_in = module.params.get('retry_interval')
private_zone_in = module.params.get('private_zone')
identifier_in = module.params.get('identifier')
weight_in = module.params.get('weight')
region_in = module.params.get('region')
health_check_in = module.params.get('health_check')
failover_in = module.params.get('failover')
vpc_id_in = module.params.get('vpc_id')
wait_in = module.params.get('wait')
wait_timeout_in = module.params.get('wait_timeout')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
value_list = ()
if type(value_in) is str:
if value_in:
value_list = sorted([s.strip() for s in value_in.split(',')])
elif type(value_in) is list:
value_list = sorted(value_in)
if zone_in[-1:] != '.':
zone_in += "."
if record_in[-1:] != '.':
record_in += "."
if command_in == 'create' or command_in == 'delete':
if not value_in:
module.fail_json(msg = "parameter 'value' required for create/delete")
elif alias_in:
if len(value_list) != 1:
module.fail_json(msg = "parameter 'value' must contain a single dns name for alias create/delete")
elif not alias_hosted_zone_id_in:
module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete")
elif ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None:
module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier")
if command_in == 'create':
if ( weight_in!=None or region_in!=None or failover_in!=None ) and identifier_in==None:
module.fail_json(msg= "If you specify failover, region or weight you must also specify identifier")
elif ( weight_in==None and region_in==None and failover_in==None ) and identifier_in!=None:
module.fail_json(msg= "You have specified identifier which makes sense only if you specify one of: weight, region or failover.")
if vpc_id_in and not private_zone_in:
module.fail_json(msg="parameter 'private_zone' must be true when specifying parameter"
" 'vpc_id'")
# connect to the route53 endpoint
try:
conn = Route53Connection(**aws_connect_kwargs)
except boto.exception.BotoServerError as e:
module.fail_json(msg = e.error_message)
# Find the named zone ID
zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in)
# Verify that the requested zone is already defined in Route53
if zone is None:
errmsg = "Zone %s does not exist in Route53" % zone_in
module.fail_json(msg = errmsg)
record = {}
found_record = False
wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in,
identifier=identifier_in, weight=weight_in, region=region_in,
health_check=health_check_in, failover=failover_in)
for v in value_list:
if alias_in:
wanted_rset.set_alias(alias_hosted_zone_id_in, v, alias_evaluate_target_health_in)
else:
wanted_rset.add_value(v)
sets = conn.get_all_rrsets(zone.id, name=record_in, type=type_in, identifier=identifier_in)
for rset in sets:
# Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
# tripping of things like * and @.
decoded_name = rset.name.replace(r'\052', '*')
decoded_name = decoded_name.replace(r'\100', '@')
#Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block
rset.name = decoded_name
if identifier_in is not None:
identifier_in = str(identifier_in)
if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in:
found_record = True
record['zone'] = zone_in
record['type'] = rset.type
record['record'] = decoded_name
record['ttl'] = rset.ttl
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
if hosted_zone_id_in:
record['hosted_zone_id'] = hosted_zone_id_in
record['identifier'] = rset.identifier
record['weight'] = rset.weight
record['region'] = rset.region
record['failover'] = rset.failover
record['health_check'] = rset.health_check
if hosted_zone_id_in:
record['hosted_zone_id'] = hosted_zone_id_in
if rset.alias_dns_name:
record['alias'] = True
record['value'] = rset.alias_dns_name
record['values'] = [rset.alias_dns_name]
record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id
record['alias_evaluate_target_health'] = rset.alias_evaluate_target_health
else:
record['alias'] = False
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml():
module.exit_json(changed=False)
break
if command_in == 'get':
if type_in == 'NS':
ns = record['values']
else:
# Retrieve name servers associated to the zone.
ns = conn.get_zone(zone_in).get_nameservers()
module.exit_json(changed=False, set=record, nameservers=ns)
if command_in == 'delete' and not found_record:
module.exit_json(changed=False)
changes = ResourceRecordSets(conn, zone.id)
if command_in == 'create' or command_in == 'delete':
if command_in == 'create' and found_record:
if not module.params['overwrite']:
module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it")
command = 'UPSERT'
else:
command = command_in.upper()
changes.add_change_record(command, wanted_rset)
try:
result = invoke_with_throttling_retries(commit, changes, retry_interval_in, wait_in, wait_timeout_in)
except boto.route53.exception.DNSServerError as e:
txt = e.body.split("<Message>")[1]
txt = txt.split("</Message>")[0]
if "but it already exists" in txt:
module.exit_json(changed=False)
else:
module.fail_json(msg = txt)
except TimeoutError:
module.fail_json(msg='Timeout waiting for changes to replicate')
module.exit_json(changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
passalis/sef | sef_dr/classification.py | 1 | 1360 | # License: MIT License https://github.com/passalis/sef/blob/master/LICENSE.txt
from __future__ import absolute_import, division, print_function, unicode_literals
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import NearestCentroid
from sklearn.preprocessing import MinMaxScaler
def evaluate_svm(train_data, train_labels, test_data, test_labels, n_jobs=-1):
"""
Evaluates a representation using a Linear SVM
It uses 3-fold cross validation for selecting the C parameter
:param train_data:
:param train_labels:
:param test_data:
:param test_labels:
:param n_jobs:
:return: the test accuracy
"""
# Scale data to 0-1
scaler = MinMaxScaler()
train_data = scaler.fit_transform(train_data)
test_data = scaler.transform(test_data)
parameters = {'kernel': ['linear'], 'C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000]}
model = svm.SVC(max_iter=10000)
clf = GridSearchCV(model, parameters, n_jobs=n_jobs, cv=3)
clf.fit(train_data, train_labels)
lin_svm_test = clf.score(test_data, test_labels)
return lin_svm_test
def evaluate_ncc(train_data, train_labels, test_data, test_labels):
ncc = NearestCentroid()
ncc.fit(train_data, train_labels)
ncc_test = ncc.score(test_data, test_labels)
return ncc_test
| mit |
repotvsupertuga/tvsupertuga.repository | script.module.future/libs/past/translation/__init__.py | 61 | 18459 | # -*- coding: utf-8 -*-
"""
past.translation
==================
The ``past.translation`` package provides an import hook for Python 3 which
transparently runs ``futurize`` fixers over Python 2 code on import to convert
print statements into functions, etc.
It is intended to assist users in migrating to Python 3.x even if some
dependencies still only support Python 2.x.
Usage
-----
Once your Py2 package is installed in the usual module search path, the import
hook is invoked as follows:
>>> from past import autotranslate
>>> autotranslate('mypackagename')
Or:
>>> autotranslate(['mypackage1', 'mypackage2'])
You can unregister the hook using::
>>> from past.translation import remove_hooks
>>> remove_hooks()
Author: Ed Schofield.
Inspired by and based on ``uprefix`` by Vinay M. Sajip.
"""
import imp
import logging
import marshal
import os
import sys
import copy
from lib2to3.pgen2.parse import ParseError
from lib2to3.refactor import RefactoringTool
from libfuturize import fixes
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
myfixes = (list(fixes.libfuturize_fix_names_stage1) +
list(fixes.lib2to3_fix_names_stage1) +
list(fixes.libfuturize_fix_names_stage2) +
list(fixes.lib2to3_fix_names_stage2))
# We detect whether the code is Py2 or Py3 by applying certain lib2to3 fixers
# to it. If the diff is empty, it's Python 3 code.
py2_detect_fixers = [
# From stage 1:
'lib2to3.fixes.fix_apply',
# 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc. and move to stage2
'lib2to3.fixes.fix_except',
'lib2to3.fixes.fix_execfile',
'lib2to3.fixes.fix_exitfunc',
'lib2to3.fixes.fix_funcattrs',
'lib2to3.fixes.fix_filter',
'lib2to3.fixes.fix_has_key',
'lib2to3.fixes.fix_idioms',
'lib2to3.fixes.fix_import', # makes any implicit relative imports explicit. (Use with ``from __future__ import absolute_import)
'lib2to3.fixes.fix_intern',
'lib2to3.fixes.fix_isinstance',
'lib2to3.fixes.fix_methodattrs',
'lib2to3.fixes.fix_ne',
'lib2to3.fixes.fix_numliterals', # turns 1L into 1, 0755 into 0o755
'lib2to3.fixes.fix_paren',
'lib2to3.fixes.fix_print',
'lib2to3.fixes.fix_raise', # uses incompatible with_traceback() method on exceptions
'lib2to3.fixes.fix_renames',
'lib2to3.fixes.fix_reduce',
# 'lib2to3.fixes.fix_set_literal', # this is unnecessary and breaks Py2.6 support
'lib2to3.fixes.fix_repr',
'lib2to3.fixes.fix_standarderror',
'lib2to3.fixes.fix_sys_exc',
'lib2to3.fixes.fix_throw',
'lib2to3.fixes.fix_tuple_params',
'lib2to3.fixes.fix_types',
'lib2to3.fixes.fix_ws_comma',
'lib2to3.fixes.fix_xreadlines',
# From stage 2:
'lib2to3.fixes.fix_basestring',
# 'lib2to3.fixes.fix_buffer', # perhaps not safe. Test this.
# 'lib2to3.fixes.fix_callable', # not needed in Py3.2+
# 'lib2to3.fixes.fix_dict', # TODO: add support for utils.viewitems() etc.
'lib2to3.fixes.fix_exec',
# 'lib2to3.fixes.fix_future', # we don't want to remove __future__ imports
'lib2to3.fixes.fix_getcwdu',
# 'lib2to3.fixes.fix_imports', # called by libfuturize.fixes.fix_future_standard_library
# 'lib2to3.fixes.fix_imports2', # we don't handle this yet (dbm)
# 'lib2to3.fixes.fix_input',
# 'lib2to3.fixes.fix_itertools',
# 'lib2to3.fixes.fix_itertools_imports',
'lib2to3.fixes.fix_long',
# 'lib2to3.fixes.fix_map',
# 'lib2to3.fixes.fix_metaclass', # causes SyntaxError in Py2! Use the one from ``six`` instead
'lib2to3.fixes.fix_next',
'lib2to3.fixes.fix_nonzero', # TODO: add a decorator for mapping __bool__ to __nonzero__
# 'lib2to3.fixes.fix_operator', # we will need support for this by e.g. extending the Py2 operator module to provide those functions in Py3
'lib2to3.fixes.fix_raw_input',
# 'lib2to3.fixes.fix_unicode', # strips off the u'' prefix, which removes a potentially helpful source of information for disambiguating unicode/byte strings
# 'lib2to3.fixes.fix_urllib',
'lib2to3.fixes.fix_xrange',
# 'lib2to3.fixes.fix_zip',
]
class RTs:
"""
A namespace for the refactoring tools. This avoids creating these at
the module level, which slows down the module import. (See issue #117).
There are two possible grammars: with or without the print statement.
Hence we have two possible refactoring tool implementations.
"""
_rt = None
_rtp = None
_rt_py2_detect = None
_rtp_py2_detect = None
@staticmethod
def setup():
"""
Call this before using the refactoring tools to create them on demand
if needed.
"""
if None in [RTs._rt, RTs._rtp]:
RTs._rt = RefactoringTool(myfixes)
RTs._rtp = RefactoringTool(myfixes, {'print_function': True})
@staticmethod
def setup_detect_python2():
"""
Call this before using the refactoring tools to create them on demand
if needed.
"""
if None in [RTs._rt_py2_detect, RTs._rtp_py2_detect]:
RTs._rt_py2_detect = RefactoringTool(py2_detect_fixers)
RTs._rtp_py2_detect = RefactoringTool(py2_detect_fixers,
{'print_function': True})
# We need to find a prefix for the standard library, as we don't want to
# process any files there (they will already be Python 3).
#
# The following method is used by Sanjay Vinip in uprefix. This fails for
# ``conda`` environments:
# # In a non-pythonv virtualenv, sys.real_prefix points to the installed Python.
# # In a pythonv venv, sys.base_prefix points to the installed Python.
# # Outside a virtual environment, sys.prefix points to the installed Python.
# if hasattr(sys, 'real_prefix'):
# _syslibprefix = sys.real_prefix
# else:
# _syslibprefix = getattr(sys, 'base_prefix', sys.prefix)
# Instead, we use the portion of the path common to both the stdlib modules
# ``math`` and ``urllib``.
def splitall(path):
"""
Split a path into all components. From Python Cookbook.
"""
allparts = []
while True:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def common_substring(s1, s2):
"""
Returns the longest common substring to the two strings, starting from the
left.
"""
chunks = []
path1 = splitall(s1)
path2 = splitall(s2)
for (dir1, dir2) in zip(path1, path2):
if dir1 != dir2:
break
chunks.append(dir1)
return os.path.join(*chunks)
# _stdlibprefix = common_substring(math.__file__, urllib.__file__)
def detect_python2(source, pathname):
"""
Returns a bool indicating whether we think the code is Py2
"""
RTs.setup_detect_python2()
try:
tree = RTs._rt_py2_detect.refactor_string(source, pathname)
except ParseError as e:
if e.msg != 'bad input' or e.value != '=':
raise
tree = RTs._rtp.refactor_string(source, pathname)
if source != str(tree)[:-1]: # remove added newline
# The above fixers made changes, so we conclude it's Python 2 code
logger.debug('Detected Python 2 code: {0}'.format(pathname))
with open('/tmp/original_code.py', 'w') as f:
f.write('### Original code (detected as py2): %s\n%s' %
(pathname, source))
with open('/tmp/py2_detection_code.py', 'w') as f:
f.write('### Code after running py3 detection (from %s)\n%s' %
(pathname, str(tree)[:-1]))
return True
else:
logger.debug('Detected Python 3 code: {0}'.format(pathname))
with open('/tmp/original_code.py', 'w') as f:
f.write('### Original code (detected as py3): %s\n%s' %
(pathname, source))
try:
os.remove('/tmp/futurize_code.py')
except OSError:
pass
return False
class Py2Fixer(object):
"""
An import hook class that uses lib2to3 for source-to-source translation of
Py2 code to Py3.
"""
# See the comments on :class:future.standard_library.RenameImport.
# We add this attribute here so remove_hooks() and install_hooks() can
# unambiguously detect whether the import hook is installed:
PY2FIXER = True
def __init__(self):
self.found = None
self.base_exclude_paths = ['future', 'past']
self.exclude_paths = copy.copy(self.base_exclude_paths)
self.include_paths = []
def include(self, paths):
"""
Pass in a sequence of module names such as 'plotrique.plotting' that,
if present at the leftmost side of the full package name, would
specify the module to be transformed from Py2 to Py3.
"""
self.include_paths += paths
def exclude(self, paths):
"""
Pass in a sequence of strings such as 'mymodule' that, if
present at the leftmost side of the full package name, would cause
the module not to undergo any source transformation.
"""
self.exclude_paths += paths
def find_module(self, fullname, path=None):
logger.debug('Running find_module: {0}...'.format(fullname))
if '.' in fullname:
parent, child = fullname.rsplit('.', 1)
if path is None:
loader = self.find_module(parent, path)
mod = loader.load_module(parent)
path = mod.__path__
fullname = child
# Perhaps we should try using the new importlib functionality in Python
# 3.3: something like this?
# thing = importlib.machinery.PathFinder.find_module(fullname, path)
try:
self.found = imp.find_module(fullname, path)
except Exception as e:
logger.debug('Py2Fixer could not find {0}')
logger.debug('Exception was: {0})'.format(fullname, e))
return None
self.kind = self.found[-1][-1]
if self.kind == imp.PKG_DIRECTORY:
self.pathname = os.path.join(self.found[1], '__init__.py')
elif self.kind == imp.PY_SOURCE:
self.pathname = self.found[1]
return self
def transform(self, source):
# This implementation uses lib2to3,
# you can override and use something else
# if that's better for you
# lib2to3 likes a newline at the end
RTs.setup()
source += '\n'
try:
tree = RTs._rt.refactor_string(source, self.pathname)
except ParseError as e:
if e.msg != 'bad input' or e.value != '=':
raise
tree = RTs._rtp.refactor_string(source, self.pathname)
# could optimise a bit for only doing str(tree) if
# getattr(tree, 'was_changed', False) returns True
return str(tree)[:-1] # remove added newline
def load_module(self, fullname):
logger.debug('Running load_module for {0}...'.format(fullname))
if fullname in sys.modules:
mod = sys.modules[fullname]
else:
if self.kind in (imp.PY_COMPILED, imp.C_EXTENSION, imp.C_BUILTIN,
imp.PY_FROZEN):
convert = False
# elif (self.pathname.startswith(_stdlibprefix)
# and 'site-packages' not in self.pathname):
# # We assume it's a stdlib package in this case. Is this too brittle?
# # Please file a bug report at https://github.com/PythonCharmers/python-future
# # if so.
# convert = False
# in theory, other paths could be configured to be excluded here too
elif any([fullname.startswith(path) for path in self.exclude_paths]):
convert = False
elif any([fullname.startswith(path) for path in self.include_paths]):
convert = True
else:
convert = False
if not convert:
logger.debug('Excluded {0} from translation'.format(fullname))
mod = imp.load_module(fullname, *self.found)
else:
logger.debug('Autoconverting {0} ...'.format(fullname))
mod = imp.new_module(fullname)
sys.modules[fullname] = mod
# required by PEP 302
mod.__file__ = self.pathname
mod.__name__ = fullname
mod.__loader__ = self
# This:
# mod.__package__ = '.'.join(fullname.split('.')[:-1])
# seems to result in "SystemError: Parent module '' not loaded,
# cannot perform relative import" for a package's __init__.py
# file. We use the approach below. Another option to try is the
# minimal load_module pattern from the PEP 302 text instead.
# Is the test in the next line more or less robust than the
# following one? Presumably less ...
# ispkg = self.pathname.endswith('__init__.py')
if self.kind == imp.PKG_DIRECTORY:
mod.__path__ = [ os.path.dirname(self.pathname) ]
mod.__package__ = fullname
else:
#else, regular module
mod.__path__ = []
mod.__package__ = fullname.rpartition('.')[0]
try:
cachename = imp.cache_from_source(self.pathname)
if not os.path.exists(cachename):
update_cache = True
else:
sourcetime = os.stat(self.pathname).st_mtime
cachetime = os.stat(cachename).st_mtime
update_cache = cachetime < sourcetime
# # Force update_cache to work around a problem with it being treated as Py3 code???
# update_cache = True
if not update_cache:
with open(cachename, 'rb') as f:
data = f.read()
try:
code = marshal.loads(data)
except Exception:
# pyc could be corrupt. Regenerate it
update_cache = True
if update_cache:
if self.found[0]:
source = self.found[0].read()
elif self.kind == imp.PKG_DIRECTORY:
with open(self.pathname) as f:
source = f.read()
if detect_python2(source, self.pathname):
source = self.transform(source)
with open('/tmp/futurized_code.py', 'w') as f:
f.write('### Futurized code (from %s)\n%s' %
(self.pathname, source))
code = compile(source, self.pathname, 'exec')
dirname = os.path.dirname(cachename)
if not os.path.exists(dirname):
os.makedirs(dirname)
try:
with open(cachename, 'wb') as f:
data = marshal.dumps(code)
f.write(data)
except Exception: # could be write-protected
pass
exec(code, mod.__dict__)
except Exception as e:
# must remove module from sys.modules
del sys.modules[fullname]
raise # keep it simple
if self.found[0]:
self.found[0].close()
return mod
_hook = Py2Fixer()
def install_hooks(include_paths=(), exclude_paths=()):
if isinstance(include_paths, str):
include_paths = (include_paths,)
if isinstance(exclude_paths, str):
exclude_paths = (exclude_paths,)
assert len(include_paths) + len(exclude_paths) > 0, 'Pass at least one argument'
_hook.include(include_paths)
_hook.exclude(exclude_paths)
# _hook.debug = debug
enable = sys.version_info[0] >= 3 # enabled for all 3.x
if enable and _hook not in sys.meta_path:
sys.meta_path.insert(0, _hook) # insert at beginning. This could be made a parameter
# We could return the hook when there are ways of configuring it
#return _hook
def remove_hooks():
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def detect_hooks():
"""
Returns True if the import hooks are installed, False if not.
"""
return _hook in sys.meta_path
# present = any([hasattr(hook, 'PY2FIXER') for hook in sys.meta_path])
# return present
class hooks(object):
"""
Acts as a context manager. Use like this:
>>> from past import translation
>>> with translation.hooks():
... import mypy2module
>>> import requests # py2/3 compatible anyway
>>> # etc.
"""
def __enter__(self):
self.hooks_were_installed = detect_hooks()
install_hooks()
return self
def __exit__(self, *args):
if not self.hooks_were_installed:
remove_hooks()
class suspend_hooks(object):
"""
Acts as a context manager. Use like this:
>>> from past import translation
>>> translation.install_hooks()
>>> import http.client
>>> # ...
>>> with translation.suspend_hooks():
>>> import requests # or others that support Py2/3
If the hooks were disabled before the context, they are not installed when
the context is left.
"""
def __enter__(self):
self.hooks_were_installed = detect_hooks()
remove_hooks()
return self
def __exit__(self, *args):
if self.hooks_were_installed:
install_hooks()
| gpl-2.0 |
bo01ean/Stino | stino/pyarduino/arduino_target_board.py | 14 | 4683 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# 1. Copyright
# 2. Lisence
# 3. Author
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from . import base
class TargetBoardInfo(object):
def __init__(self, root_dirs):
self.target_board = None
self.target_sub_boards = []
self.settings = base.settings.get_arduino_settings()
self.update(root_dirs)
def update(self, root_dirs):
self.root_dirs = root_dirs
self.check_target_board()
def check_target_board(self):
boards = load_boards(self.root_dirs)
if boards:
board_ids = [board.get_id() for board in boards]
target_board_id = self.settings.get('target_board_id', '')
if not target_board_id in board_ids:
target_board_id = board_ids[0]
self.settings.set('target_board_id', target_board_id)
index = board_ids.index(target_board_id)
self.target_board = boards[index]
self.target_sub_boards = []
if self.target_board.has_options():
self.check_target_sub_boards()
def check_target_sub_boards(self):
self.target_sub_boards = []
if self.target_board and self.target_board.has_options():
target_board_id = self.target_board.get_id()
board_options = self.target_board.get_options()
for option_index, option in enumerate(board_options):
target_sub_board_info = TargetSubBoardInfo(self.target_board,
option_index)
target_sub_board = target_sub_board_info.get_target_sub_board()
self.target_sub_boards.append(target_sub_board)
target_sub_board_ids = [
sb.get_id() for sb in self.target_sub_boards]
self.settings.set(target_board_id, target_sub_board_ids)
def change_target_board(self, board_id):
self.settings.set('target_board_id', board_id)
self.check_target_board()
def change_target_sub_board(self, option_index, sub_board_id):
if self.target_board and self.target_board.has_options():
target_board_id = self.target_board.get_id()
target_sub_board_ids = self.settings.get(target_board_id)
target_sub_board_ids[option_index] = sub_board_id
self.settings.set(target_board_id, target_sub_board_ids)
self.check_target_sub_boards()
def get_target_board(self):
return self.target_board
def get_target_sub_boards(self):
return self.target_sub_boards
def get_target_arch(self):
target_board_id = self.target_board.get_id()
ids = target_board_id.split('.')
target_arch = ids[-2]
return target_arch
def get_params(self):
params = {}
if self.target_board:
params.update(self.target_board.get_params())
for target_sub_board in self.target_sub_boards:
params.update(target_sub_board.get_params())
return params
class TargetSubBoardInfo(object):
def __init__(self, target_board, option_index):
self.target_sub_board = None
self.target_board = target_board
self.option_index = option_index
self.settings = base.settings.get_arduino_settings()
self.check_target_sub_board()
def check_target_sub_board(self):
if self.target_board:
target_board_id = self.target_board.get_id()
target_sub_board_ids = self.settings.get(target_board_id, [])
if self.option_index < len(target_sub_board_ids):
target_sub_board_id = target_sub_board_ids[self.option_index]
options = self.target_board.get_options()
option = options[self.option_index]
self.target_sub_board = option.get_item(target_sub_board_id)
if not self.target_sub_board:
options = self.target_board.get_options()
option = options[self.option_index]
sub_boards = option.get_items()
self.target_sub_board = sub_boards[0]
def get_target_sub_board(self):
return self.target_sub_board
def get_params(self):
return self.target_sub_board.get_params()
def load_boards(root_dirs):
boards = []
for root_dir in root_dirs:
for package in root_dir.get_packages():
for platform in package.get_platforms():
boards += platform.get_boards()
return boards
| mit |
robdennis/sideboard | tests/plugins/different_versions/rdflib3_0_0/env/lib/python2.7/site-packages/rdflib/plugins/parsers/ntriples.py | 7 | 6700 | #!/usr/bin/env python
__doc__="""
N-Triples Parser
License: GPL 2, W3C, BSD, or MIT
Author: Sean B. Palmer, inamidst.com
Documentation: http://inamidst.com/proj/rdf/ntriples-doc
Command line usage::
./ntriples.py <URI> - parses URI as N-Triples
./ntriples.py --help - prints out this help message
# @@ fully empty document?
"""
import re
uriref = r'<([^:]+:[^\s"<>]+)>'
literal = r'"([^"\\]*(?:\\.[^"\\]*)*)"'
litinfo = r'(?:@([a-z]+(?:-[a-z0-9]+)*)|\^\^' + uriref + r')?'
r_line = re.compile(r'([^\r\n]*)(?:\r\n|\r|\n)')
r_wspace = re.compile(r'[ \t]*')
r_wspaces = re.compile(r'[ \t]+')
r_tail = re.compile(r'[ \t]*\.[ \t]*')
r_uriref = re.compile(uriref)
r_nodeid = re.compile(r'_:([A-Za-z][A-Za-z0-9]*)')
r_literal = re.compile(literal + litinfo)
bufsiz = 2048
validate = False
class Node(unicode): pass
# class URI(Node): pass
# class bNode(Node): pass
# class Literal(Node):
# def __new__(cls, lit, lang=None, dtype=None):
# n = str(lang) + ' ' + str(dtype) + ' ' + lit
# return unicode.__new__(cls, n)
from rdflib.term import URIRef as URI
from rdflib.term import BNode as bNode
from rdflib.term import Literal
class Sink(object):
def __init__(self):
self.length = 0
def triple(self, s, p, o):
self.length += 1
print (s, p, o)
class ParseError(Exception): pass
quot = {'t': '\t', 'n': '\n', 'r': '\r', '"': '"', '\\': '\\'}
r_safe = re.compile(r'([\x20\x21\x23-\x5B\x5D-\x7E]+)')
r_quot = re.compile(r'\\(t|n|r|"|\\)')
r_uniquot = re.compile(r'\\u([0-9A-F]{4})|\\U([0-9A-F]{8})')
def unquote(s):
"""Unquote an N-Triples string."""
result = []
while s:
m = r_safe.match(s)
if m:
s = s[m.end():]
result.append(m.group(1))
continue
m = r_quot.match(s)
if m:
s = s[2:]
result.append(quot[m.group(1)])
continue
m = r_uniquot.match(s)
if m:
s = s[m.end():]
u, U = m.groups()
codepoint = int(u or U, 16)
if codepoint > 0x10FFFF:
raise ParseError("Disallowed codepoint: %08X" % codepoint)
result.append(unichr(codepoint))
elif s.startswith('\\'):
raise ParseError("Illegal escape at: %s..." % s[:10])
else: raise ParseError("Illegal literal character: %r" % s[0])
return unicode(''.join(result))
if not validate:
def unquote(s):
return s.decode('unicode-escape')
r_hibyte = re.compile(r'([\x80-\xFF])')
def uriquote(uri):
return r_hibyte.sub(lambda m: '%%%02X' % ord(m.group(1)), uri)
if not validate:
def uriquote(uri):
return uri
class NTriplesParser(object):
"""An N-Triples Parser.
Usage::
p = NTriplesParser(sink=MySink())
sink = p.parse(f) # file; use parsestring for a string
"""
def __init__(self, sink=None):
if sink is not None:
self.sink = sink
else: self.sink = Sink()
def parse(self, f):
"""Parse f as an N-Triples file."""
if not hasattr(f, 'read'):
raise ParseError("Item to parse must be a file-like object.")
self.file = f
self.buffer = ''
while True:
self.line = self.readline()
if self.line is None: break
try: self.parseline()
except ParseError:
raise ParseError("Invalid line: %r" % self.line)
return self.sink
def parsestring(self, s):
"""Parse s as an N-Triples string."""
if not isinstance(s, basestring):
raise ParseError("Item to parse must be a string instance.")
from cStringIO import StringIO
f = StringIO()
f.write(s)
f.seek(0)
self.parse(f)
def readline(self):
"""Read an N-Triples line from buffered input."""
# N-Triples lines end in either CRLF, CR, or LF
# Therefore, we can't just use f.readline()
if not self.buffer:
buffer = self.file.read(bufsiz)
if not buffer: return None
self.buffer = buffer
while True:
m = r_line.match(self.buffer)
if m: # the more likely prospect
self.buffer = self.buffer[m.end():]
return m.group(1)
else:
buffer = self.file.read(bufsiz)
if not buffer and not self.buffer.isspace():
raise ParseError("EOF in line")
elif not buffer:
return None
self.buffer += buffer
def parseline(self):
self.eat(r_wspace)
if (not self.line) or self.line.startswith('#'):
return # The line is empty or a comment
subject = self.subject()
self.eat(r_wspaces)
predicate = self.predicate()
self.eat(r_wspaces)
object = self.object()
self.eat(r_tail)
if self.line:
raise ParseError("Trailing garbage")
self.sink.triple(subject, predicate, object)
def peek(self, token):
return self.line.startswith(token)
def eat(self, pattern):
m = pattern.match(self.line)
if not m: # @@ Why can't we get the original pattern?
raise ParseError("Failed to eat %s" % pattern)
self.line = self.line[m.end():]
return m
def subject(self):
# @@ Consider using dictionary cases
subj = self.uriref() or self.nodeid()
if not subj:
raise ParseError("Subject must be uriref or nodeID")
return subj
def predicate(self):
pred = self.uriref()
if not pred:
raise ParseError("Predicate must be uriref")
return pred
def object(self):
objt = self.uriref() or self.nodeid() or self.literal()
if objt is False:
raise ParseError("Unrecognised object type")
return objt
def uriref(self):
if self.peek('<'):
uri = self.eat(r_uriref).group(1)
uri = unquote(uri)
uri = uriquote(uri)
return URI(uri)
return False
def nodeid(self):
if self.peek('_'):
return bNode(self.eat(r_nodeid).group(1))
return False
def literal(self):
if self.peek('"'):
lit, lang, dtype = self.eat(r_literal).groups()
lang = lang or None
dtype = dtype or None
if lang and dtype:
raise ParseError("Can't have both a language and a datatype")
lit = unquote(lit)
return Literal(lit, lang, dtype)
return False
def parseURI(uri):
import urllib
parser = NTriplesParser()
u = urllib.urlopen(uri)
sink = parser.parse(u)
u.close()
# for triple in sink:
# print triple
print 'Length of input:', sink.length
def main():
import sys
if len(sys.argv) == 2:
parseURI(sys.argv[1])
else: print __doc__
if __name__=="__main__":
main()
| bsd-3-clause |
M4sse/chromium.src | tools/memory_inspector/memory_inspector/core/native_heap_unittest.py | 89 | 6402 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The test scenario is as follows:
VA space: |0 |4k |8k |12k |16k |20k ... |64k |65k |66k
Mmaps: [ anon 1 ][anon 2] [anon 3] ... [ exe1 ][ exe2 ]
Resident: *******-------*******-------******* (*:resident, -:not resident)
Allocs: <1> <2> < 3 >
| | |
S.Traces: | | +-----------> st1[exe1 + 0, exe1 + 4]
| +--------------------------> st1[exe1 + 0, exe1 + 4]
+------------------------------> st2[exe1 + 0, exe2 + 4, post-exe2]
Furthermore, the exe2 is a file mapping with non-zero (8k) offset.
"""
import unittest
from memory_inspector.core import memory_map
from memory_inspector.core import native_heap
from memory_inspector.core import stacktrace
from memory_inspector.core import symbol
from memory_inspector.core.memory_map import PAGE_SIZE
class NativeHeapTest(unittest.TestCase):
def runTest(self):
nheap = native_heap.NativeHeap()
EXE_1_MM_BASE = 64 * PAGE_SIZE
EXE_2_MM_BASE = 65 * PAGE_SIZE
EXE_2_FILE_OFF = 8192
st1 = stacktrace.Stacktrace()
st1.Add(nheap.GetStackFrame(EXE_1_MM_BASE))
st1.Add(nheap.GetStackFrame(EXE_1_MM_BASE + 4))
st2 = stacktrace.Stacktrace()
st2.Add(nheap.GetStackFrame(EXE_1_MM_BASE))
st2.Add(nheap.GetStackFrame(EXE_2_MM_BASE + 4))
st2.Add(nheap.GetStackFrame(EXE_2_MM_BASE + PAGE_SIZE + 4))
# Check that GetStackFrames keeps one unique object instance per address.
# This is to guarantee that the symbolization logic (SymbolizeUsingSymbolDB)
# can cheaply iterate on distinct stack frames rather than re-processing
# every stack frame for each allocation (and save memory as well).
self.assertIs(st1[0], st2[0])
self.assertIsNot(st1[0], st1[1])
self.assertIsNot(st2[0], st2[1])
alloc1 = native_heap.Allocation(start=4, size=4, stack_trace=st1)
alloc2 = native_heap.Allocation(start=4090, size=8, stack_trace=st1)
alloc3 = native_heap.Allocation(start=8190, size=10000, stack_trace=st2)
nheap.Add(alloc1)
nheap.Add(alloc2)
nheap.Add(alloc3)
self.assertEqual(len(nheap.allocations), 3)
self.assertIn(alloc1, nheap.allocations)
self.assertIn(alloc2, nheap.allocations)
self.assertIn(alloc3, nheap.allocations)
############################################################################
# Test the relativization (absolute address -> mmap + offset) logic.
############################################################################
mmap = memory_map
mmap = memory_map.Map()
mmap.Add(memory_map.MapEntry(EXE_1_MM_BASE, EXE_1_MM_BASE + PAGE_SIZE - 1,
'rw--', '/d/exe1', 0))
mmap.Add(memory_map.MapEntry(EXE_2_MM_BASE, EXE_2_MM_BASE + PAGE_SIZE - 1,
'rw--', 'exe2',EXE_2_FILE_OFF))
# Entry for EXE_3 is deliberately missing to check the fallback behavior.
nheap.RelativizeStackFrames(mmap)
self.assertEqual(st1[0].exec_file_rel_path, '/d/exe1')
self.assertEqual(st1[0].exec_file_name, 'exe1')
self.assertEqual(st1[0].offset, 0)
self.assertEqual(st1[1].exec_file_rel_path, '/d/exe1')
self.assertEqual(st1[1].exec_file_name, 'exe1')
self.assertEqual(st1[1].offset, 4)
self.assertEqual(st2[0].exec_file_rel_path, '/d/exe1')
self.assertEqual(st2[0].exec_file_name, 'exe1')
self.assertEqual(st2[0].offset, 0)
self.assertEqual(st2[1].exec_file_rel_path, 'exe2')
self.assertEqual(st2[1].exec_file_name, 'exe2')
self.assertEqual(st2[1].offset, 4 + EXE_2_FILE_OFF)
self.assertIsNone(st2[2].exec_file_rel_path)
self.assertIsNone(st2[2].exec_file_name)
self.assertIsNone(st2[2].offset)
############################################################################
# Test the symbolization logic.
############################################################################
syms = symbol.Symbols()
syms.Add('/d/exe1', 0, symbol.Symbol('sym1', 'src1.c', 1)) # st1[0]
syms.Add('exe2', 4 + EXE_2_FILE_OFF, symbol.Symbol('sym3')) # st2[1]
nheap.SymbolizeUsingSymbolDB(syms)
self.assertEqual(st1[0].symbol.name, 'sym1')
self.assertEqual(st1[0].symbol.source_info[0].source_file_path, 'src1.c')
self.assertEqual(st1[0].symbol.source_info[0].line_number, 1)
# st1[1] should have no symbol info, because we didn't provide any above.
self.assertIsNone(st1[1].symbol)
# st2[0] and st1[0] were the same Frame. Expect identical symbols instances.
self.assertIs(st2[0].symbol, st1[0].symbol)
# st2[1] should have a symbols name, but no source line info.
self.assertEqual(st2[1].symbol.name, 'sym3')
self.assertEqual(len(st2[1].symbol.source_info), 0)
# st2[2] should have no sym because we didn't even provide a mmap for exe3.
self.assertIsNone(st2[2].symbol)
############################################################################
# Test the resident size calculation logic (intersects mmaps and allocs).
############################################################################
mmap.Add(
memory_map.MapEntry(0, 8191, 'rw--', '', 0, resident_pages=[1]))
mmap.Add(
memory_map.MapEntry(8192, 12287, 'rw--', '', 0, resident_pages=[1]))
# [12k, 16k] is deliberately missing to check the fallback behavior.
mmap.Add(
memory_map.MapEntry(16384, 20479, 'rw--', '', 0, resident_pages=[1]))
nheap.CalculateResidentSize(mmap)
# alloc1 [4, 8] is fully resident because it lays in the first resident 4k.
self.assertEqual(alloc1.resident_size, 4)
# alloc2 [4090, 4098] should have only 6 resident bytes ([4090,4096]), but
# not the last two, which lay on the second page which is noijt resident.
self.assertEqual(alloc2.resident_size, 6)
# alloc3 [8190, 18190] is split as follows (* = resident):
# [8190, 8192]: these 2 bytes are NOT resident, they lay in the 2nd page.
# *[8192, 12288]: the 3rd page is resident and is fully covered by alloc3.
# [12288, 16384]: the 4th page is fully covered as well, but not resident.
# *[16384, 18190]: the 5th page is partially covered and resident.
self.assertEqual(alloc3.resident_size, (12288 - 8192) + (18190 - 16384))
| bsd-3-clause |
search5/nanumlectures | lib/oauthlib/oauth2/rfc6749/endpoints/resource.py | 98 | 3316 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749
~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for consuming and providing OAuth 2.0 RFC6749.
"""
from __future__ import absolute_import, unicode_literals
import logging
from oauthlib.common import Request
from .base import BaseEndpoint, catch_errors_and_unavailability
log = logging.getLogger(__name__)
class ResourceEndpoint(BaseEndpoint):
"""Authorizes access to protected resources.
The client accesses protected resources by presenting the access
token to the resource server. The resource server MUST validate the
access token and ensure that it has not expired and that its scope
covers the requested resource. The methods used by the resource
server to validate the access token (as well as any error responses)
are beyond the scope of this specification but generally involve an
interaction or coordination between the resource server and the
authorization server::
# For most cases, returning a 403 should suffice.
The method in which the client utilizes the access token to
authenticate with the resource server depends on the type of access
token issued by the authorization server. Typically, it involves
using the HTTP "Authorization" request header field [RFC2617] with an
authentication scheme defined by the specification of the access
token type used, such as [RFC6750]::
# Access tokens may also be provided in query and body
https://example.com/protected?access_token=kjfch2345sdf # Query
access_token=sdf23409df # Body
"""
def __init__(self, default_token, token_types):
BaseEndpoint.__init__(self)
self._tokens = token_types
self._default_token = default_token
@property
def default_token(self):
return self._default_token
@property
def default_token_type_handler(self):
return self.tokens.get(self.default_token)
@property
def tokens(self):
return self._tokens
@catch_errors_and_unavailability
def verify_request(self, uri, http_method='GET', body=None, headers=None,
scopes=None):
"""Validate client, code etc, return body + headers"""
request = Request(uri, http_method, body, headers)
request.token_type = self.find_token_type(request)
request.scopes = scopes
token_type_handler = self.tokens.get(request.token_type,
self.default_token_type_handler)
log.debug('Dispatching token_type %s request to %r.',
request.token_type, token_type_handler)
return token_type_handler.validate_request(request), request
def find_token_type(self, request):
"""Token type identification.
RFC 6749 does not provide a method for easily differentiating between
different token types during protected resource access. We estimate
the most likely token type (if any) by asking each known token type
to give an estimation based on the request.
"""
estimates = sorted(((t.estimate_type(request), n)
for n, t in self.tokens.items()))
return estimates[0][1] if len(estimates) else None
| apache-2.0 |
Pan0ram1x/bitnodes | crawl.py | 2 | 13691 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# crawl.py - Greenlets-based Bitcoin network crawler.
#
# Copyright (c) Addy Yeow Chin Heng <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Greenlets-based Bitcoin network crawler.
"""
from gevent import monkey
monkey.patch_all()
import gevent
import json
import logging
import os
import redis
import redis.connection
import socket
import sys
import time
from binascii import hexlify
from collections import Counter
from ConfigParser import ConfigParser
from ipaddress import ip_network
from protocol import (ProtocolError, ConnectionError, Connection, SERVICES,
DEFAULT_PORT)
redis.connection.socket = gevent.socket
# Redis connection setup
REDIS_SOCKET = os.environ.get('REDIS_SOCKET', "/tmp/redis.sock")
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
REDIS_CONN = redis.StrictRedis(unix_socket_path=REDIS_SOCKET,
password=REDIS_PASSWORD)
SETTINGS = {}
def enumerate_node(redis_pipe, addr_msgs, now):
"""
Adds all peering nodes with max. age of 24 hours into the crawl set.
"""
peers = 0
for addr_msg in addr_msgs:
if 'addr_list' in addr_msg:
for peer in addr_msg['addr_list']:
age = now - peer['timestamp'] # seconds
# Add peering node with age <= 24 hours into crawl set
if age >= 0 and age <= SETTINGS['max_age']:
address = peer['ipv4'] if peer['ipv4'] else peer['ipv6']
port = peer['port'] if peer['port'] > 0 else DEFAULT_PORT
services = peer['services']
if not address:
continue
if is_excluded(address):
logging.debug("Exclude: %s", address)
continue
redis_pipe.sadd('pending', (address, port, services))
peers += 1
return peers
def connect(redis_conn, key):
"""
Establishes connection with a node to:
1) Send version message
2) Receive version and verack message
3) Send getaddr message
4) Receive addr message containing list of peering nodes
Stores state and height for node in Redis.
"""
handshake_msgs = []
addr_msgs = []
redis_conn.hset(key, 'state', "") # Set Redis hash for a new node
(address, port, services) = key[5:].split("-", 2)
height = redis_conn.get('height')
if height:
height = int(height)
conn = Connection((address, int(port)),
(SETTINGS['source_address'], 0),
socket_timeout=SETTINGS['socket_timeout'],
protocol_version=SETTINGS['protocol_version'],
to_services=int(services),
from_services=SETTINGS['services'],
user_agent=SETTINGS['user_agent'],
height=height,
relay=SETTINGS['relay'])
try:
logging.debug("Connecting to %s", conn.to_addr)
conn.open()
handshake_msgs = conn.handshake()
addr_msgs = conn.getaddr()
except (ProtocolError, ConnectionError, socket.error) as err:
logging.debug("%s: %s", conn.to_addr, err)
finally:
conn.close()
gevent.sleep(0.3)
redis_pipe = redis_conn.pipeline()
if len(handshake_msgs) > 0:
height_key = "height:{}-{}".format(address, port)
redis_pipe.setex(height_key, SETTINGS['max_age'],
handshake_msgs[0].get('height', 0))
now = int(time.time())
peers = enumerate_node(redis_pipe, addr_msgs, now)
logging.debug("%s Peers: %d", conn.to_addr, peers)
redis_pipe.hset(key, 'state', "up")
redis_pipe.execute()
def dump(timestamp, nodes):
"""
Dumps data for reachable nodes into timestamp-prefixed JSON file and
returns most common height from the nodes.
"""
json_data = []
for node in nodes:
(address, port, services) = node[5:].split("-", 2)
try:
height = int(REDIS_CONN.get("height:{}-{}".format(address, port)))
except TypeError:
logging.warning("height:%s-%s missing", address, port)
height = 0
json_data.append([address, int(port), int(services), height])
json_output = os.path.join(SETTINGS['crawl_dir'],
"{}.json".format(timestamp))
open(json_output, 'w').write(json.dumps(json_data))
logging.info("Wrote %s", json_output)
return Counter([node[-1] for node in json_data]).most_common(1)[0][0]
def restart(timestamp):
"""
Dumps data for the reachable nodes into a JSON file.
Loads all reachable nodes from Redis into the crawl set.
Removes keys for all nodes from current crawl.
Updates number of reachable nodes and most common height in Redis.
"""
nodes = [] # Reachable nodes
keys = REDIS_CONN.keys('node:*')
logging.debug("Keys: %d", len(keys))
redis_pipe = REDIS_CONN.pipeline()
for key in keys:
state = REDIS_CONN.hget(key, 'state')
if state == "up":
nodes.append(key)
(address, port, services) = key[5:].split("-", 2)
redis_pipe.sadd('pending', (address, int(port), int(services)))
redis_pipe.delete(key)
# Reachable nodes from https://getaddr.bitnodes.io/#join-the-network
checked_nodes = REDIS_CONN.zrangebyscore(
'check', timestamp - SETTINGS['max_age'], timestamp)
for node in checked_nodes:
(address, port, services) = eval(node)
if is_excluded(address):
logging.debug("Exclude: %s", address)
continue
redis_pipe.sadd('pending', (address, port, services))
redis_pipe.execute()
reachable_nodes = len(nodes)
logging.info("Reachable nodes: %d", reachable_nodes)
REDIS_CONN.lpush('nodes', (timestamp, reachable_nodes))
height = dump(timestamp, nodes)
REDIS_CONN.set('height', height)
logging.info("Height: %d", height)
def cron():
"""
Assigned to a worker to perform the following tasks periodically to
maintain a continuous crawl:
1) Reports the current number of nodes in crawl set
2) Initiates a new crawl once the crawl set is empty
"""
start = int(time.time())
while True:
pending_nodes = REDIS_CONN.scard('pending')
logging.info("Pending: %d", pending_nodes)
if pending_nodes == 0:
REDIS_CONN.set('crawl:master:state', "starting")
now = int(time.time())
elapsed = now - start
REDIS_CONN.set('elapsed', elapsed)
logging.info("Elapsed: %d", elapsed)
logging.info("Restarting")
restart(now)
start = int(time.time())
REDIS_CONN.set('crawl:master:state', "running")
gevent.sleep(SETTINGS['cron_delay'])
def task():
"""
Assigned to a worker to retrieve (pop) a node from the crawl set and
attempt to establish connection with a new node.
"""
redis_conn = redis.StrictRedis(unix_socket_path=REDIS_SOCKET,
password=REDIS_PASSWORD)
while True:
if not SETTINGS['master']:
while REDIS_CONN.get('crawl:master:state') != "running":
gevent.sleep(SETTINGS['socket_timeout'])
node = redis_conn.spop('pending') # Pop random node from set
if node is None:
gevent.sleep(1)
continue
node = eval(node) # Convert string from Redis to tuple
# Skip IPv6 node
if ":" in node[0] and not SETTINGS['ipv6']:
continue
key = "node:{}-{}-{}".format(node[0], node[1], node[2])
if redis_conn.exists(key):
continue
connect(redis_conn, key)
def set_pending():
"""
Initializes pending set in Redis with a list of reachable nodes from DNS
seeders to bootstrap the crawler.
"""
for seeder in SETTINGS['seeders']:
nodes = []
try:
nodes = socket.getaddrinfo(seeder, None)
except socket.gaierror as err:
logging.warning("%s", err)
continue
for node in nodes:
address = node[-1][0]
if is_excluded(address):
logging.debug("Exclude: %s", address)
continue
logging.debug("%s: %s", seeder, address)
REDIS_CONN.sadd('pending', (address, DEFAULT_PORT, SERVICES))
def is_excluded(address):
"""
Returns True if address is found in exclusion list, False if otherwise.
"""
address_family = socket.AF_INET
key = 'exclude_ipv4_networks'
if ":" in address:
address_family = socket.AF_INET6
key = 'exclude_ipv6_networks'
try:
addr = int(hexlify(socket.inet_pton(address_family, address)), 16)
except socket.error:
logging.warning("Bad address: %s", address)
return True
return any([(addr & net[1] == net[0]) for net in SETTINGS[key]])
def init_settings(argv):
"""
Populates SETTINGS with key-value pairs from configuration file.
"""
conf = ConfigParser()
conf.read(argv[1])
SETTINGS['logfile'] = conf.get('crawl', 'logfile')
SETTINGS['seeders'] = conf.get('crawl', 'seeders').strip().split("\n")
SETTINGS['workers'] = conf.getint('crawl', 'workers')
SETTINGS['debug'] = conf.getboolean('crawl', 'debug')
SETTINGS['source_address'] = conf.get('crawl', 'source_address')
SETTINGS['protocol_version'] = conf.getint('crawl', 'protocol_version')
SETTINGS['user_agent'] = conf.get('crawl', 'user_agent')
SETTINGS['services'] = conf.getint('crawl', 'services')
SETTINGS['relay'] = conf.getint('crawl', 'relay')
SETTINGS['socket_timeout'] = conf.getint('crawl', 'socket_timeout')
SETTINGS['cron_delay'] = conf.getint('crawl', 'cron_delay')
SETTINGS['max_age'] = conf.getint('crawl', 'max_age')
SETTINGS['ipv6'] = conf.getboolean('crawl', 'ipv6')
exclude_ipv4_networks = conf.get(
'crawl', 'exclude_ipv4_networks').strip().split("\n")
exclude_ipv6_networks = conf.get(
'crawl', 'exclude_ipv6_networks').strip().split("\n")
# List of tuples of network address and netmask
SETTINGS['exclude_ipv4_networks'] = []
SETTINGS['exclude_ipv6_networks'] = []
for network in exclude_ipv4_networks:
try:
network = ip_network(unicode(network))
except ValueError:
continue
SETTINGS['exclude_ipv4_networks'].append(
(int(network.network_address), int(network.netmask)))
for network in exclude_ipv6_networks:
try:
network = ip_network(unicode(network))
except ValueError:
continue
SETTINGS['exclude_ipv6_networks'].append(
(int(network.network_address), int(network.netmask)))
SETTINGS['crawl_dir'] = conf.get('crawl', 'crawl_dir')
if not os.path.exists(SETTINGS['crawl_dir']):
os.makedirs(SETTINGS['crawl_dir'])
# Set to True for master process
SETTINGS['master'] = argv[2] == "master"
def main(argv):
if len(argv) < 3 or not os.path.exists(argv[1]):
print("Usage: crawl.py [config] [master|slave]")
return 1
# Initialize global settings
init_settings(argv)
# Initialize logger
loglevel = logging.INFO
if SETTINGS['debug']:
loglevel = logging.DEBUG
logformat = ("[%(process)d] %(asctime)s,%(msecs)05.1f %(levelname)s "
"(%(funcName)s) %(message)s")
logging.basicConfig(level=loglevel,
format=logformat,
filename=SETTINGS['logfile'],
filemode='a')
print("Writing output to {}, press CTRL+C to terminate..".format(
SETTINGS['logfile']))
if SETTINGS['master']:
REDIS_CONN.set('crawl:master:state', "starting")
logging.info("Removing all keys")
keys = REDIS_CONN.keys('node:*')
redis_pipe = REDIS_CONN.pipeline()
for key in keys:
redis_pipe.delete(key)
redis_pipe.delete('pending')
redis_pipe.execute()
set_pending()
# Spawn workers (greenlets) including one worker reserved for cron tasks
workers = []
if SETTINGS['master']:
workers.append(gevent.spawn(cron))
for _ in xrange(SETTINGS['workers'] - len(workers)):
workers.append(gevent.spawn(task))
logging.info("Workers: %d", len(workers))
gevent.joinall(workers)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
topxiaoke/myedx | lms/djangoapps/dashboard/views.py | 23 | 3191 | from django.http import Http404
from edxmako.shortcuts import render_to_response
from django.db import connection
from student.models import CourseEnrollment
from django.contrib.auth.models import User
def dictfetchall(cursor):
'''Returns a list of all rows from a cursor as a column: result dict.
Borrowed from Django documentation'''
desc = cursor.description
table = []
table.append([col[0] for col in desc])
# ensure response from db is a list, not a tuple (which is returned
# by MySQL backed django instances)
rows_from_cursor=cursor.fetchall()
table = table + [list(row) for row in rows_from_cursor]
return table
def SQL_query_to_list(cursor, query_string):
cursor.execute(query_string)
raw_result=dictfetchall(cursor)
return raw_result
def dashboard(request):
"""
Slightly less hackish hack to show staff enrollment numbers and other
simple queries.
All queries here should be indexed and simple. Mostly, this means don't
touch courseware_studentmodule, as tempting as it may be.
"""
if not request.user.is_staff:
raise Http404
# results are passed to the template. The template knows how to render
# two types of results: scalars and tables. Scalars should be represented
# as "Visible Title": Value and tables should be lists of lists where each
# inner list represents a single row of the table
results = {"scalars":{},"tables":{}}
# count how many users we have
results["scalars"]["Unique Usernames"]=User.objects.filter().count()
results["scalars"]["Activated Usernames"]=User.objects.filter(is_active=1).count()
# count how many enrollments we have
results["scalars"]["Total Enrollments Across All Courses"] = CourseEnrollment.objects.filter(is_active=1).count()
# establish a direct connection to the database (for executing raw SQL)
cursor = connection.cursor()
# define the queries that will generate our user-facing tables
# table queries need not take the form of raw SQL, but do in this case since
# the MySQL backend for django isn't very friendly with group by or distinct
table_queries = {}
table_queries["course registrations (current enrollments)"] = """
select
course_id as Course,
count(user_id) as Students
from student_courseenrollment
where is_active=1
group by course_id
order by students desc;"""
table_queries["number of students in each number of classes"] = """
select registrations as 'Registered for __ Classes' ,
count(registrations) as Users
from (select count(user_id) as registrations
from student_courseenrollment
where is_active=1
group by user_id) as registrations_per_user
group by registrations;"""
# add the result for each of the table_queries to the results object
for query in table_queries.keys():
cursor.execute(table_queries[query])
results["tables"][query] = SQL_query_to_list(cursor, table_queries[query])
context={"results":results}
return render_to_response("admin_dashboard.html",context)
| agpl-3.0 |
J861449197/edx-platform | common/test/acceptance/pages/lms/teams.py | 5 | 16012 | # -*- coding: utf-8 -*-
"""
Teams pages.
"""
from .course_page import CoursePage
from .discussion import InlineDiscussionPage
from ..common.paging import PaginatedUIMixin
from ...pages.studio.utils import confirm_prompt
from .fields import FieldsMixin
TOPIC_CARD_CSS = 'div.wrapper-card-core'
CARD_TITLE_CSS = 'h3.card-title'
MY_TEAMS_BUTTON_CSS = 'a.nav-item[data-index="0"]'
BROWSE_BUTTON_CSS = 'a.nav-item[data-index="1"]'
TEAMS_LINK_CSS = '.action-view'
TEAMS_HEADER_CSS = '.teams-header'
CREATE_TEAM_LINK_CSS = '.create-team'
class TeamCardsMixin(object):
"""Provides common operations on the team card component."""
@property
def team_cards(self):
"""Get all the team cards on the page."""
return self.q(css='.team-card')
@property
def team_names(self):
"""Return the names of each team on the page."""
return self.q(css='h3.card-title').map(lambda e: e.text).results
@property
def team_descriptions(self):
"""Return the names of each team on the page."""
return self.q(css='p.card-description').map(lambda e: e.text).results
class TeamsPage(CoursePage):
"""
Teams page/tab.
"""
url_path = "teams"
def is_browser_on_page(self):
""" Checks if teams page is being viewed """
return self.q(css='body.view-teams').present
def get_body_text(self):
""" Returns the current dummy text. This will be changed once there is more content on the page. """
main_page_content_css = '.page-content-main'
self.wait_for(
lambda: len(self.q(css=main_page_content_css).text) == 1,
description="Body text is present"
)
return self.q(css=main_page_content_css).text[0]
def active_tab(self):
""" Get the active tab. """
return self.q(css='.is-active').attrs('data-url')[0]
def browse_topics(self):
""" View the Browse tab of the Teams page. """
self.q(css=BROWSE_BUTTON_CSS).click()
def verify_team_count_in_first_topic(self, expected_count):
"""
Verify that the team count on the first topic card in the topic list is correct
(browse topics page).
"""
self.wait_for(
lambda: self.q(css='.team-count')[0].text == "0 Teams" if expected_count == 0 else "1 Team",
description="Team count text on topic is wrong"
)
def verify_topic_team_count(self, expected_count):
""" Verify the number of teams listed on the topic page (browse teams within topic). """
self.wait_for(
lambda: len(self.q(css='.team-card')) == expected_count,
description="Expected number of teams is wrong"
)
def verify_my_team_count(self, expected_count):
""" Verify the number of teams on 'My Team'. """
# Click to "My Team" and verify that it contains the expected number of teams.
self.q(css=MY_TEAMS_BUTTON_CSS).click()
self.wait_for(
lambda: len(self.q(css='.team-card')) == expected_count,
description="Expected number of teams is wrong"
)
def click_all_topics(self):
""" Click on the "All Topics" breadcrumb """
self.q(css='a.nav-item').filter(text='All Topics')[0].click()
def click_specific_topic(self, topic):
""" Click on the breadcrumb for a specific topic """
self.q(css='a.nav-item').filter(text=topic)[0].click()
class MyTeamsPage(CoursePage, PaginatedUIMixin, TeamCardsMixin):
"""
The 'My Teams' tab of the Teams page.
"""
url_path = "teams/#my-teams"
def is_browser_on_page(self):
"""Check if the "My Teams" tab is being viewed."""
button_classes = self.q(css=MY_TEAMS_BUTTON_CSS).attrs('class')
if len(button_classes) == 0:
return False
return 'is-active' in button_classes[0]
class BrowseTopicsPage(CoursePage, PaginatedUIMixin):
"""
The 'Browse' tab of the Teams page.
"""
url_path = "teams/#browse"
def is_browser_on_page(self):
"""Check if the Browse tab is being viewed."""
button_classes = self.q(css=BROWSE_BUTTON_CSS).attrs('class')
if len(button_classes) == 0:
return False
return 'is-active' in button_classes[0]
@property
def topic_cards(self):
"""Return a list of the topic cards present on the page."""
return self.q(css=TOPIC_CARD_CSS).results
@property
def topic_names(self):
"""Return a list of the topic names present on the page."""
return self.q(css=CARD_TITLE_CSS).map(lambda e: e.text).results
def browse_teams_for_topic(self, topic_name):
"""
Show the teams list for `topic_name`.
"""
self.q(css=TEAMS_LINK_CSS).filter(
text='View Teams in the {topic_name} Topic'.format(topic_name=topic_name)
)[0].click()
self.wait_for_ajax()
def sort_topics_by(self, sort_order):
"""Sort the list of topics by the given `sort_order`."""
self.q(
css='#paging-header-select option[value={sort_order}]'.format(sort_order=sort_order)
).click()
self.wait_for_ajax()
class BrowseTeamsPage(CoursePage, PaginatedUIMixin, TeamCardsMixin):
"""
The paginated UI for browsing teams within a Topic on the Teams
page.
"""
def __init__(self, browser, course_id, topic):
"""
Set up `self.url_path` on instantiation, since it dynamically
reflects the current topic. Note that `topic` is a dict
representation of a topic following the same convention as a
course module's topic.
"""
super(BrowseTeamsPage, self).__init__(browser, course_id)
self.topic = topic
self.url_path = "teams/#topics/{topic_id}".format(topic_id=self.topic['id'])
def is_browser_on_page(self):
"""Check if we're on the teams list page for a particular topic."""
self.wait_for_element_presence('.team-actions', 'Wait for the bottom links to be present')
has_correct_url = self.url.endswith(self.url_path)
teams_list_view_present = self.q(css='.teams-main').present
return has_correct_url and teams_list_view_present
@property
def header_topic_name(self):
"""Get the topic name displayed by the page header"""
return self.q(css=TEAMS_HEADER_CSS + ' .page-title')[0].text
@property
def header_topic_description(self):
"""Get the topic description displayed by the page header"""
return self.q(css=TEAMS_HEADER_CSS + ' .page-description')[0].text
@property
def sort_order(self):
"""Return the current sort order on the page."""
return self.q(
css='#paging-header-select option'
).filter(
lambda e: e.is_selected()
).results[0].text.strip()
def click_create_team_link(self):
""" Click on create team link."""
query = self.q(css=CREATE_TEAM_LINK_CSS)
if query.present:
query.first.click()
self.wait_for_ajax()
def click_search_team_link(self):
""" Click on create team link."""
query = self.q(css='.search-team-descriptions')
if query.present:
query.first.click()
self.wait_for_ajax()
def click_browse_all_teams_link(self):
""" Click on browse team link."""
query = self.q(css='.browse-teams')
if query.present:
query.first.click()
self.wait_for_ajax()
def sort_teams_by(self, sort_order):
"""Sort the list of teams by the given `sort_order`."""
self.q(
css='#paging-header-select option[value={sort_order}]'.format(sort_order=sort_order)
).click()
self.wait_for_ajax()
class CreateOrEditTeamPage(CoursePage, FieldsMixin):
"""
Create team page.
"""
def __init__(self, browser, course_id, topic):
"""
Set up `self.url_path` on instantiation, since it dynamically
reflects the current topic. Note that `topic` is a dict
representation of a topic following the same convention as a
course module's topic.
"""
super(CreateOrEditTeamPage, self).__init__(browser, course_id)
self.topic = topic
self.url_path = "teams/#topics/{topic_id}/create-team".format(topic_id=self.topic['id'])
def is_browser_on_page(self):
"""Check if we're on the create team page for a particular topic."""
has_correct_url = self.url.endswith(self.url_path)
teams_create_view_present = self.q(css='.team-edit-fields').present
return has_correct_url and teams_create_view_present
@property
def header_page_name(self):
"""Get the page name displayed by the page header"""
return self.q(css='.page-header .page-title')[0].text
@property
def header_page_description(self):
"""Get the page description displayed by the page header"""
return self.q(css='.page-header .page-description')[0].text
@property
def header_page_breadcrumbs(self):
"""Get the page breadcrumb text displayed by the page header"""
return self.q(css='.page-header .breadcrumbs')[0].text
@property
def validation_message_text(self):
"""Get the error message text"""
return self.q(css='.create-team.wrapper-msg .copy')[0].text
def submit_form(self):
"""Click on create team button"""
self.q(css='.create-team .action-primary').first.click()
self.wait_for_ajax()
def cancel_team(self):
"""Click on cancel team button"""
self.q(css='.create-team .action-cancel').first.click()
self.wait_for_ajax()
class TeamPage(CoursePage, PaginatedUIMixin):
"""
The page for a specific Team within the Teams tab
"""
def __init__(self, browser, course_id, team=None):
"""
Set up `self.url_path` on instantiation, since it dynamically
reflects the current team.
"""
super(TeamPage, self).__init__(browser, course_id)
self.team = team
if self.team:
self.url_path = "teams/#teams/{topic_id}/{team_id}".format(
topic_id=self.team['topic_id'], team_id=self.team['id']
)
def is_browser_on_page(self):
"""Check if we're on the teams list page for a particular team."""
self.wait_for_ajax()
if self.team:
if not self.url.endswith(self.url_path):
return False
return self.q(css='.team-profile').present
@property
def discussion_id(self):
"""Get the id of the discussion module on the page"""
return self.q(css='div.discussion-module').attrs('data-discussion-id')[0]
@property
def discussion_page(self):
"""Get the discussion as a bok_choy page object"""
if not hasattr(self, '_discussion_page'):
# pylint: disable=attribute-defined-outside-init
self._discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
return self._discussion_page
@property
def team_name(self):
"""Get the team's name as displayed in the page header"""
return self.q(css='.page-header .page-title')[0].text
@property
def team_description(self):
"""Get the team's description as displayed in the page header"""
return self.q(css=TEAMS_HEADER_CSS + ' .page-description')[0].text
@property
def team_members_present(self):
"""Verifies that team members are present"""
return self.q(css='.page-content-secondary .team-members .team-member').present
@property
def team_capacity_text(self):
"""Returns team capacity text"""
return self.q(css='.page-content-secondary .team-capacity :last-child').text[0]
@property
def team_location(self):
""" Returns team location/country. """
return self.q(css='.page-content-secondary .team-country :last-child').text[0]
@property
def team_language(self):
""" Returns team location/country. """
return self.q(css='.page-content-secondary .team-language :last-child').text[0]
@property
def team_user_membership_text(self):
"""Returns the team membership text"""
query = self.q(css='.page-content-secondary > .team-user-membership-status')
return query.text[0] if query.present else ''
@property
def team_leave_link_present(self):
"""Verifies that team leave link is present"""
return self.q(css='.leave-team-link').present
def click_leave_team_link(self, remaining_members=0, cancel=False):
""" Click on Leave Team link"""
self.q(css='.leave-team-link').first.click()
confirm_prompt(self, cancel, require_notification=False)
if cancel is False:
self.wait_for(
lambda: self.join_team_button_present,
description="Join Team button did not become present"
)
self.wait_for_capacity_text(remaining_members)
@property
def team_members(self):
"""Returns the number of team members in this team"""
return len(self.q(css='.page-content-secondary .team-member'))
def click_first_profile_image(self):
"""Clicks on first team member's profile image"""
self.q(css='.page-content-secondary .members-info > .team-member').first.click()
@property
def first_member_username(self):
"""Returns the username of team member"""
return self.q(css='.page-content-secondary .tooltip-custom').text[0]
def click_join_team_button(self, total_members=1):
""" Click on Join Team button"""
self.q(css='.join-team .action-primary').first.click()
self.wait_for(
lambda: not self.join_team_button_present,
description="Join Team button did not go away"
)
self.wait_for_capacity_text(total_members)
def wait_for_capacity_text(self, num_members, max_size=10):
""" Wait for the team capacity text to be correct. """
self.wait_for(
lambda: self.team_capacity_text == self.format_capacity_text(num_members, max_size),
description="Team capacity text is not correct"
)
def format_capacity_text(self, num_members, max_size):
""" Helper method to format the expected team capacity text. """
return '{num_members} / {max_size} {members_text}'.format(
num_members=num_members,
max_size=max_size,
members_text='Member' if num_members == max_size else 'Members'
)
@property
def join_team_message(self):
""" Returns join team message """
self.wait_for_ajax()
return self.q(css='.join-team .join-team-message').text[0]
@property
def join_team_button_present(self):
""" Returns True if Join Team button is present else False """
return self.q(css='.join-team .action-primary').present
@property
def join_team_message_present(self):
""" Returns True if Join Team message is present else False """
return self.q(css='.join-team .join-team-message').present
@property
def new_post_button_present(self):
""" Returns True if New Post button is present else False """
return self.q(css='.discussion-module .new-post-btn').present
def click_all_topics_breadcrumb(self):
"""Navigate to the 'All Topics' page."""
self.q(css='.breadcrumbs a').results[0].click()
self.wait_for_ajax()
@property
def edit_team_button_present(self):
""" Returns True if Edit Team button is present else False """
return self.q(css='.form-actions .action-edit-team').present
def click_edit_team_button(self):
""" Click on Edit Team button"""
self.q(css='.form-actions .action-edit-team').first.click()
| agpl-3.0 |
onceuponatimeforever/oh-mainline | vendor/packages/requests/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| agpl-3.0 |
mick-d/nipype | nipype/algorithms/tests/test_auto_TCompCor.py | 1 | 1551 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..confounds import TCompCor
def test_TCompCor_inputs():
input_map = dict(components_file=dict(usedefault=True,
),
header_prefix=dict(),
high_pass_cutoff=dict(usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
ignore_initial_volumes=dict(usedefault=True,
),
mask_files=dict(),
mask_index=dict(requires=['mask_files'],
xor=['merge_method'],
),
merge_method=dict(requires=['mask_files'],
xor=['mask_index'],
),
num_components=dict(usedefault=True,
),
percentile_threshold=dict(usedefault=True,
),
pre_filter=dict(usedefault=True,
),
realigned_file=dict(mandatory=True,
),
regress_poly_degree=dict(usedefault=True,
),
repetition_time=dict(),
save_pre_filter=dict(),
use_regress_poly=dict(deprecated='0.15.0',
new_name='pre_filter',
),
)
inputs = TCompCor.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TCompCor_outputs():
output_map = dict(components_file=dict(),
high_variance_masks=dict(),
pre_filter_file=dict(),
)
outputs = TCompCor.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| bsd-3-clause |
denisff/python-for-android | python3-alpha/python3-src/Lib/lib2to3/pgen2/grammar.py | 54 | 5379 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
# Python imports
import pickle
# Local imports
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def copy(self):
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
"tokens", "symbol2label"):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print("s2n")
pprint(self.symbol2number)
print("n2s")
pprint(self.number2symbol)
print("states")
pprint(self.states)
print("dfas")
pprint(self.dfas)
print("labels")
pprint(self.labels)
print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
| apache-2.0 |
bbedward/ZenKernel_Angler | tools/perf/util/setup.py | 2079 | 1438 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
chrishas35/django-travis-ci | django/views/decorators/http.py | 97 | 7121 | """
Decorators for views based on HTTP headers.
"""
from calendar import timegm
from functools import wraps
from django.utils.decorators import decorator_from_middleware, available_attrs
from django.utils.http import http_date, parse_http_date_safe, parse_etags, quote_etag
from django.utils.log import getLogger
from django.middleware.http import ConditionalGetMiddleware
from django.http import HttpResponseNotAllowed, HttpResponseNotModified, HttpResponse
conditional_page = decorator_from_middleware(ConditionalGetMiddleware)
logger = getLogger('django.request')
def require_http_methods(request_method_list):
"""
Decorator to make a view only accept particular request methods. Usage::
@require_http_methods(["GET", "POST"])
def my_view(request):
# I can assume now that only GET or POST requests make it this far
# ...
Note that request methods should be in uppercase.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
if request.method not in request_method_list:
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': request
}
)
return HttpResponseNotAllowed(request_method_list)
return func(request, *args, **kwargs)
return inner
return decorator
require_GET = require_http_methods(["GET"])
require_GET.__doc__ = "Decorator to require that a view only accept the GET method."
require_POST = require_http_methods(["POST"])
require_POST.__doc__ = "Decorator to require that a view only accept the POST method."
require_safe = require_http_methods(["GET", "HEAD"])
require_safe.__doc__ = "Decorator to require that a view only accept safe methods: GET and HEAD."
def condition(etag_func=None, last_modified_func=None):
"""
Decorator to support conditional retrieval (or change) for a view
function.
The parameters are callables to compute the ETag and last modified time for
the requested resource, respectively. The callables are passed the same
parameters as the view itself. The Etag function should return a string (or
None if the resource doesn't exist), whilst the last_modified function
should return a datetime object (or None if the resource doesn't exist).
If both parameters are provided, all the preconditions must be met before
the view is processed.
This decorator will either pass control to the wrapped view function or
return an HTTP 304 response (unmodified) or 412 response (preconditions
failed), depending upon the request method.
Any behavior marked as "undefined" in the HTTP spec (e.g. If-none-match
plus If-modified-since headers) will result in the view function being
called.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner(request, *args, **kwargs):
# Get HTTP request headers
if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
if if_modified_since:
if_modified_since = parse_http_date_safe(if_modified_since)
if_none_match = request.META.get("HTTP_IF_NONE_MATCH")
if_match = request.META.get("HTTP_IF_MATCH")
if if_none_match or if_match:
# There can be more than one ETag in the request, so we
# consider the list of values.
try:
etags = parse_etags(if_none_match or if_match)
except ValueError:
# In case of invalid etag ignore all ETag headers.
# Apparently Opera sends invalidly quoted headers at times
# (we should be returning a 400 response, but that's a
# little extreme) -- this is Django bug #10681.
if_none_match = None
if_match = None
# Compute values (if any) for the requested resource.
if etag_func:
res_etag = etag_func(request, *args, **kwargs)
else:
res_etag = None
if last_modified_func:
dt = last_modified_func(request, *args, **kwargs)
if dt:
res_last_modified = timegm(dt.utctimetuple())
else:
res_last_modified = None
else:
res_last_modified = None
response = None
if not ((if_match and (if_modified_since or if_none_match)) or
(if_match and if_none_match)):
# We only get here if no undefined combinations of headers are
# specified.
if ((if_none_match and (res_etag in etags or
"*" in etags and res_etag)) and
(not if_modified_since or
(res_last_modified and if_modified_since and
res_last_modified <= if_modified_since))):
if request.method in ("GET", "HEAD"):
response = HttpResponseNotModified()
else:
logger.warning('Precondition Failed: %s', request.path,
extra={
'status_code': 412,
'request': request
}
)
response = HttpResponse(status=412)
elif if_match and ((not res_etag and "*" in etags) or
(res_etag and res_etag not in etags)):
logger.warning('Precondition Failed: %s', request.path,
extra={
'status_code': 412,
'request': request
}
)
response = HttpResponse(status=412)
elif (not if_none_match and request.method == "GET" and
res_last_modified and if_modified_since and
res_last_modified <= if_modified_since):
response = HttpResponseNotModified()
if response is None:
response = func(request, *args, **kwargs)
# Set relevant headers on the response if they don't already exist.
if res_last_modified and not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date(res_last_modified)
if res_etag and not response.has_header('ETag'):
response['ETag'] = quote_etag(res_etag)
return response
return inner
return decorator
# Shortcut decorators for common cases based on ETag or Last-Modified only
def etag(etag_func):
return condition(etag_func=etag_func)
def last_modified(last_modified_func):
return condition(last_modified_func=last_modified_func)
| bsd-3-clause |
brython-dev/brython | www/src/Lib/test/test_code.py | 2 | 12718 | """This module includes tests of the code object representation.
>>> def f(x):
... def g(y):
... return x + y
... return g
...
>>> dump(f.__code__)
name: f
argcount: 1
posonlyargcount: 0
kwonlyargcount: 0
names: ()
varnames: ('x', 'g')
cellvars: ('x',)
freevars: ()
nlocals: 2
flags: 3
consts: ('None', '<code object g>', "'f.<locals>.g'")
>>> dump(f(4).__code__)
name: g
argcount: 1
posonlyargcount: 0
kwonlyargcount: 0
names: ()
varnames: ('y',)
cellvars: ()
freevars: ('x',)
nlocals: 1
flags: 19
consts: ('None',)
>>> def h(x, y):
... a = x + y
... b = x - y
... c = a * b
... return c
...
>>> dump(h.__code__)
name: h
argcount: 2
posonlyargcount: 0
kwonlyargcount: 0
names: ()
varnames: ('x', 'y', 'a', 'b', 'c')
cellvars: ()
freevars: ()
nlocals: 5
flags: 67
consts: ('None',)
>>> def attrs(obj):
... print(obj.attr1)
... print(obj.attr2)
... print(obj.attr3)
>>> dump(attrs.__code__)
name: attrs
argcount: 1
posonlyargcount: 0
kwonlyargcount: 0
names: ('print', 'attr1', 'attr2', 'attr3')
varnames: ('obj',)
cellvars: ()
freevars: ()
nlocals: 1
flags: 67
consts: ('None',)
>>> def optimize_away():
... 'doc string'
... 'not a docstring'
... 53
... 0x53
>>> dump(optimize_away.__code__)
name: optimize_away
argcount: 0
posonlyargcount: 0
kwonlyargcount: 0
names: ()
varnames: ()
cellvars: ()
freevars: ()
nlocals: 0
flags: 67
consts: ("'doc string'", 'None')
>>> def keywordonly_args(a,b,*,k1):
... return a,b,k1
...
>>> dump(keywordonly_args.__code__)
name: keywordonly_args
argcount: 2
posonlyargcount: 0
kwonlyargcount: 1
names: ()
varnames: ('a', 'b', 'k1')
cellvars: ()
freevars: ()
nlocals: 3
flags: 67
consts: ('None',)
>>> def posonly_args(a,b,/,c):
... return a,b,c
...
>>> dump(posonly_args.__code__)
name: posonly_args
argcount: 3
posonlyargcount: 2
kwonlyargcount: 0
names: ()
varnames: ('a', 'b', 'c')
cellvars: ()
freevars: ()
nlocals: 3
flags: 67
consts: ('None',)
"""
import inspect
import sys
import threading
import unittest
import weakref
try:
import ctypes
except ImportError:
ctypes = None
from test.support import (run_doctest, run_unittest, cpython_only,
check_impl_detail)
def consts(t):
"""Yield a doctest-safe sequence of object reprs."""
for elt in t:
r = repr(elt)
if r.startswith("<code object"):
yield "<code object %s>" % elt.co_name
else:
yield r
def dump(co):
"""Print out a text representation of a code object."""
for attr in ["name", "argcount", "posonlyargcount",
"kwonlyargcount", "names", "varnames",
"cellvars", "freevars", "nlocals", "flags"]:
print("%s: %s" % (attr, getattr(co, "co_" + attr)))
print("consts:", tuple(consts(co.co_consts)))
# Needed for test_closure_injection below
# Defined at global scope to avoid implicitly closing over __class__
def external_getitem(self, i):
return f"Foreign getitem: {super().__getitem__(i)}"
class CodeTest(unittest.TestCase):
@cpython_only
def test_newempty(self):
import _testcapi
co = _testcapi.code_newempty("filename", "funcname", 15)
self.assertEqual(co.co_filename, "filename")
self.assertEqual(co.co_name, "funcname")
self.assertEqual(co.co_firstlineno, 15)
@cpython_only
def test_closure_injection(self):
# From https://bugs.python.org/issue32176
from types import FunctionType
def create_closure(__class__):
return (lambda: __class__).__closure__
def new_code(c):
'''A new code object with a __class__ cell added to freevars'''
return c.replace(co_freevars=c.co_freevars + ('__class__',))
def add_foreign_method(cls, name, f):
code = new_code(f.__code__)
assert not f.__closure__
closure = create_closure(cls)
defaults = f.__defaults__
setattr(cls, name, FunctionType(code, globals(), name, defaults, closure))
class List(list):
pass
add_foreign_method(List, "__getitem__", external_getitem)
# Ensure the closure injection actually worked
function = List.__getitem__
class_ref = function.__closure__[0].cell_contents
self.assertIs(class_ref, List)
# Ensure the code correctly indicates it accesses a free variable
self.assertFalse(function.__code__.co_flags & inspect.CO_NOFREE,
hex(function.__code__.co_flags))
# Ensure the zero-arg super() call in the injected method works
obj = List([1, 2, 3])
self.assertEqual(obj[0], "Foreign getitem: 1")
def test_constructor(self):
def func(): pass
co = func.__code__
CodeType = type(co)
# test code constructor
return CodeType(co.co_argcount,
co.co_posonlyargcount,
co.co_kwonlyargcount,
co.co_nlocals,
co.co_stacksize,
co.co_flags,
co.co_code,
co.co_consts,
co.co_names,
co.co_varnames,
co.co_filename,
co.co_name,
co.co_firstlineno,
co.co_lnotab,
co.co_freevars,
co.co_cellvars)
def test_replace(self):
def func():
x = 1
return x
code = func.__code__
# different co_name, co_varnames, co_consts
def func2():
y = 2
return y
code2 = func2.__code__
for attr, value in (
("co_argcount", 0),
("co_posonlyargcount", 0),
("co_kwonlyargcount", 0),
("co_nlocals", 0),
("co_stacksize", 0),
("co_flags", code.co_flags | inspect.CO_COROUTINE),
("co_firstlineno", 100),
("co_code", code2.co_code),
("co_consts", code2.co_consts),
("co_names", ("myname",)),
("co_varnames", code2.co_varnames),
("co_freevars", ("freevar",)),
("co_cellvars", ("cellvar",)),
("co_filename", "newfilename"),
("co_name", "newname"),
("co_lnotab", code2.co_lnotab),
):
with self.subTest(attr=attr, value=value):
new_code = code.replace(**{attr: value})
self.assertEqual(getattr(new_code, attr), value)
def isinterned(s):
return s is sys.intern(('_' + s + '_')[1:-1])
class CodeConstsTest(unittest.TestCase):
def find_const(self, consts, value):
for v in consts:
if v == value:
return v
self.assertIn(value, consts) # raises an exception
self.fail('Should never be reached')
def assertIsInterned(self, s):
if not isinterned(s):
self.fail('String %r is not interned' % (s,))
def assertIsNotInterned(self, s):
if isinterned(s):
self.fail('String %r is interned' % (s,))
@cpython_only
def test_interned_string(self):
co = compile('res = "str_value"', '?', 'exec')
v = self.find_const(co.co_consts, 'str_value')
self.assertIsInterned(v)
@cpython_only
def test_interned_string_in_tuple(self):
co = compile('res = ("str_value",)', '?', 'exec')
v = self.find_const(co.co_consts, ('str_value',))
self.assertIsInterned(v[0])
@cpython_only
def test_interned_string_in_frozenset(self):
co = compile('res = a in {"str_value"}', '?', 'exec')
v = self.find_const(co.co_consts, frozenset(('str_value',)))
self.assertIsInterned(tuple(v)[0])
@cpython_only
def test_interned_string_default(self):
def f(a='str_value'):
return a
self.assertIsInterned(f())
@cpython_only
def test_interned_string_with_null(self):
co = compile(r'res = "str\0value!"', '?', 'exec')
v = self.find_const(co.co_consts, 'str\0value!')
self.assertIsNotInterned(v)
class CodeWeakRefTest(unittest.TestCase):
def test_basic(self):
# Create a code object in a clean environment so that we know we have
# the only reference to it left.
namespace = {}
exec("def f(): pass", globals(), namespace)
f = namespace["f"]
del namespace
self.called = False
def callback(code):
self.called = True
# f is now the last reference to the function, and through it, the code
# object. While we hold it, check that we can create a weakref and
# deref it. Then delete it, and check that the callback gets called and
# the reference dies.
coderef = weakref.ref(f.__code__, callback)
self.assertTrue(bool(coderef()))
del f
self.assertFalse(bool(coderef()))
self.assertTrue(self.called)
if check_impl_detail(cpython=True) and ctypes is not None:
py = ctypes.pythonapi
freefunc = ctypes.CFUNCTYPE(None,ctypes.c_voidp)
RequestCodeExtraIndex = py._PyEval_RequestCodeExtraIndex
RequestCodeExtraIndex.argtypes = (freefunc,)
RequestCodeExtraIndex.restype = ctypes.c_ssize_t
SetExtra = py._PyCode_SetExtra
SetExtra.argtypes = (ctypes.py_object, ctypes.c_ssize_t, ctypes.c_voidp)
SetExtra.restype = ctypes.c_int
GetExtra = py._PyCode_GetExtra
GetExtra.argtypes = (ctypes.py_object, ctypes.c_ssize_t,
ctypes.POINTER(ctypes.c_voidp))
GetExtra.restype = ctypes.c_int
LAST_FREED = None
def myfree(ptr):
global LAST_FREED
LAST_FREED = ptr
FREE_FUNC = freefunc(myfree)
FREE_INDEX = RequestCodeExtraIndex(FREE_FUNC)
class CoExtra(unittest.TestCase):
def get_func(self):
# Defining a function causes the containing function to have a
# reference to the code object. We need the code objects to go
# away, so we eval a lambda.
return eval('lambda:42')
def test_get_non_code(self):
f = self.get_func()
self.assertRaises(SystemError, SetExtra, 42, FREE_INDEX,
ctypes.c_voidp(100))
self.assertRaises(SystemError, GetExtra, 42, FREE_INDEX,
ctypes.c_voidp(100))
def test_bad_index(self):
f = self.get_func()
self.assertRaises(SystemError, SetExtra, f.__code__,
FREE_INDEX+100, ctypes.c_voidp(100))
self.assertEqual(GetExtra(f.__code__, FREE_INDEX+100,
ctypes.c_voidp(100)), 0)
def test_free_called(self):
# Verify that the provided free function gets invoked
# when the code object is cleaned up.
f = self.get_func()
SetExtra(f.__code__, FREE_INDEX, ctypes.c_voidp(100))
del f
self.assertEqual(LAST_FREED, 100)
def test_get_set(self):
# Test basic get/set round tripping.
f = self.get_func()
extra = ctypes.c_voidp()
SetExtra(f.__code__, FREE_INDEX, ctypes.c_voidp(200))
# reset should free...
SetExtra(f.__code__, FREE_INDEX, ctypes.c_voidp(300))
self.assertEqual(LAST_FREED, 200)
extra = ctypes.c_voidp()
GetExtra(f.__code__, FREE_INDEX, extra)
self.assertEqual(extra.value, 300)
del f
def test_free_different_thread(self):
# Freeing a code object on a different thread then
# where the co_extra was set should be safe.
f = self.get_func()
class ThreadTest(threading.Thread):
def __init__(self, f, test):
super().__init__()
self.f = f
self.test = test
def run(self):
del self.f
self.test.assertEqual(LAST_FREED, 500)
SetExtra(f.__code__, FREE_INDEX, ctypes.c_voidp(500))
tt = ThreadTest(f, self)
del f
tt.start()
tt.join()
self.assertEqual(LAST_FREED, 500)
def test_main(verbose=None):
from test import test_code
run_doctest(test_code, verbose)
tests = [CodeTest, CodeConstsTest, CodeWeakRefTest]
if check_impl_detail(cpython=True) and ctypes is not None:
tests.append(CoExtra)
run_unittest(*tests)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
Transkribus/TranskribusDU | TranskribusDU/gcn/gcn_models.py | 1 | 106471 | # -*- coding: utf-8 -*-
"""
@author: Stéphane Clinchant
"""
import math
import random
import warnings
import numpy as np
import tensorflow as tf
try:
tf.train.AdamOptimizer
except AttributeError:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import sklearn.metrics
#TODO Clean this
# Animesh commented this line out from gcn.gcn_datasets import GCNDataset
# from gcn.gcn_datasets import GCNDataset
try:
from . import gcn_datasets
except ImportError:
import gcn_datasets
from common.trace import traceln
def init_glorot(shape, name=None):
"""Glorot & Bengio (AISTATS 2010) init."""
init_range = np.sqrt(6.0/(shape[0]+shape[1]))
initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
def init_normal(shape,stddev,name=None):
initial=tf.random_normal(shape, mean=0.0, stddev=stddev, dtype=np.float32)
return tf.Variable(initial, name=name)
class MultiGraphNN(object):
'''
Abstract Class for a Neural Net learned on a graph list
'''
def train_lG(self,session,gcn_graph_train):
'''
Train an a list of graph
:param session:
:param gcn_graph_train:
:return:
'''
for g in gcn_graph_train:
self.train(session, g, n_iter=1)
def test_lG(self, session, gcn_graph_test, verbose=True):
'''
Test on a list of Graph
:param session:
:param gcn_graph_test:
:return:
'''
acc_tp = np.float64(0.0)
nb_node_total = np.float64(0.0)
mean_acc_test = []
for g in gcn_graph_test:
acc = self.test(session, g, verbose=False)
mean_acc_test.append(acc)
nb_node_total += g.X.shape[0]
acc_tp += acc * g.X.shape[0]
g_acc = np.mean(mean_acc_test)
node_acc = acc_tp / nb_node_total
if verbose:
traceln('\t -- Mean Graph Accuracy', '%.4f' % g_acc)
traceln('\t -- Mean Node Accuracy', '%.4f' % node_acc)
return g_acc,node_acc
def predict_lG(self,session,gcn_graph_predict,verbose=True):
'''
Predict for a list of graph
:param session:
:param gcn_graph_test:
:return:
'''
lY_pred=[]
for g in gcn_graph_predict:
gY_pred = self.predict(session, g, verbose=verbose)
lY_pred.append(gY_pred)
return lY_pred
def predict_prob_lG(self, session, l_gcn_graph, verbose=True):
'''
Predict Probabilities for a list of graph
:param session:
:param l_gcn_graph:
:return a list of predictions
'''
lY_pred = []
for g in l_gcn_graph:
gY_pred = self.prediction_prob(session, g, verbose=verbose)
lY_pred.append(gY_pred)
return lY_pred
def get_nb_params(self):
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
#traceln(shape)
#traceln(len(shape))
variable_parameters = 1
for dim in shape:
#traceln(dim)
variable_parameters *= dim.value
#traceln(variable_parameters)
total_parameters += variable_parameters
return total_parameters
def train_with_validation_set(self,session,graph_train,graph_val,max_iter,eval_iter=10,patience=7,graph_test=None,save_model_path=None):
'''
Implements training with a validation set
The model is trained and accuracy is measure on a validation sets
In addition, the model can be save and one can perform early stopping thanks to the patience argument
:param session:
:param graph_train: the list of graph to train on
:param graph_val: the list of graph used for validation
:param max_iter: maximum number of epochs
:param eval_iter: evaluate every eval_iter
:param patience: stopped training if accuracy is not improved on the validation set after patience_value
:param graph_test: Optional. If a test set is provided, then accuracy on the test set is reported
:param save_model_path: checkpoints filename to save the model.
:return: A Dictionary with training accuracies, validations accuracies and test accuracies if any, and the Wedge parameters
'''
best_val_acc=0.0
wait=0
stop_training=False
stopped_iter=max_iter
train_accuracies=[]
validation_accuracies=[]
test_accuracies=[]
conf_mat=[]
start_monitoring_val_acc=False
for i in range(max_iter):
if stop_training:
break
if i % eval_iter == 0:
traceln('\n -- Epoch ', i,' Patience ', wait)
_, tr_acc = self.test_lG(session, graph_train, verbose=False)
traceln(' Train Acc ', '%.4f' % tr_acc)
train_accuracies.append(tr_acc)
_, node_acc = self.test_lG(session, graph_val, verbose=False)
traceln(' -- Valid Acc ', '%.4f' % node_acc)
validation_accuracies.append(node_acc)
if save_model_path:
save_path = self.saver.save(session, save_model_path, global_step=i)
if graph_test:
test_graph_acc,test_acc = self.test_lG(session, graph_test, verbose=False)
traceln(' -- Test Acc ', '%.4f' % test_acc,' %.4f' % test_graph_acc)
test_accuracies.append(test_acc)
if node_acc > best_val_acc:
best_val_acc = node_acc
wait = 0
else:
if wait >= patience:
stopped_iter = i
stop_training = True
wait += 1
else:
random.shuffle(graph_train)
for g in graph_train:
self.train(session, g, n_iter=1)
#Final Save
traceln(' -- Stopped Model Training after : ',stopped_iter)
traceln(' -- Validation Accuracies : ',['%.4f' % (100*sx) for sx in validation_accuracies])
#traceln('Final Training Accuracy')
_,node_train_acc = self.test_lG(session, graph_train)
traceln(' -- Final Training Accuracy','%.4f' % node_train_acc)
traceln(' -- Final Valid Acc')
self.test_lG(session, graph_val)
R = {}
R['train_acc'] = train_accuracies
R['val_acc'] = validation_accuracies
R['test_acc'] = test_accuracies
R['stopped_iter'] = stopped_iter
R['confusion_matrix'] = conf_mat
#R['W_edge'] =self.get_Wedge(session)
if graph_test:
_, final_test_acc = self.test_lG(session, graph_test)
traceln(' -- Final Test Acc','%.4f' % final_test_acc)
R['final_test_acc'] = final_test_acc
val = R['val_acc']
traceln(' -- Validation scores', val)
epoch_index = np.argmax(val)
traceln(' -- Best performance on val set: Epoch', epoch_index,val[epoch_index])
traceln(' -- Test Performance from val', test_accuracies[epoch_index])
return R
class EnsembleGraphNN(MultiGraphNN):
'''
An ensemble of Graph NN Models
Construction Outside of class
'''
def __init__(self, graph_nn_models):
self.models = graph_nn_models
def train_lG(self, session, gcn_graph_train):
'''
Train an a list of graph
:param session:
:param gcn_graph_train:
:return:
'''
for m in self.models:
m.train_lG(session, gcn_graph_train)
def test_lG(self, session, gcn_graph_test, verbose=True):
'''
Test on a list of Graph
:param session:
:param gcn_graph_test:
:return:
'''
acc_tp = np.float64(0.0)
nb_node_total = np.float64(0.0)
mean_acc_test = []
Y_pred=self.predict_lG(session,gcn_graph_test)
Y_true =[g.Y for g in gcn_graph_test]
Y_pred_node = np.vstack(Y_pred)
node_acc = sklearn.metrics.accuracy_score(np.argmax(np.vstack(Y_true),axis=1),np.argmax(Y_pred_node,axis=1))
g_acc =-1
#node_acc = acc_tp / nb_node_total
if verbose:
traceln(' -- Mean Graph Accuracy', '%.4f' % g_acc)
traceln(' -- Mean Node Accuracy', '%.4f' % node_acc)
return g_acc, node_acc
def predict_lG(self, session, gcn_graph_predict, verbose=True):
'''
Predict for a list of graph
:param session:
:param gcn_graph_test:
:return:
'''
lY_pred = []
#Seem Very Slow ... Here
#I should predict for all graph
nb_models = float(len(self.models))
for g in gcn_graph_predict:
#Average Proba Here
g_pred=[]
for m in self.models:
gY_pred = m.prediction_prob(session, g, verbose=verbose)
g_pred.append(gY_pred)
#traceln(gY_pred)
lY_pred.append(np.sum(g_pred,axis=0)/nb_models)
return lY_pred
def train_with_validation_set(self, session, graph_train, graph_val, max_iter, eval_iter=10, patience=7,
graph_test=None, save_model_path=None):
raise NotImplementedError
class Logit(MultiGraphNN):
'''
Logistic Regression for MultiGraph
'''
def __init__(self,node_dim,nb_classes,learning_rate=0.1,mu=0.1,node_indim=-1):
self.node_dim=node_dim
self.n_classes=nb_classes
self.learning_rate=learning_rate
self.activation=tf.nn.relu
self.mu=mu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.stack_instead_add=False
self.train_Wn0=True
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
def create_model(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
self.Wnode_layers=[]
self.Bnode_layers=[]
self.W_classif = tf.Variable(tf.random_uniform((self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
self.logits =tf.add(tf.matmul(self.node_input,self.W_classif),self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
# Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver= tf.train.Saver(max_to_keep=5)
traceln(' -- Number of Params: ', self.get_nb_params())
def save_model(self, session, model_filename):
traceln(" -- Saving Model")
save_path = self.saver.save(session, model_filename)
def restore_model(self, session, model_filename):
self.saver.restore(session, model_filename)
traceln(" -- Model restored.")
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln('Train',X.shape,EA.shape)
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
self.y_input: graph.Y,
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
self.y_input: graph.Y,
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.node_input: graph.X,
}
Ops = session.run([self.pred], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
return Ops[0]
class EdgeConvNet(MultiGraphNN):
'''
Edge-GCN Model for a graph list
'''
#Variable ignored by the set_learning_options
_setter_variables={
"node_dim":True,"edge_dim":True,"nb_class":True,
"num_layers":True,"lr":True,"mu":True,
"node_indim":True,"nconv_edge":True,
"nb_iter":True,"ratio_train_val":True}
def __init__(self,node_dim,edge_dim,nb_classes,num_layers=1,learning_rate=0.1,mu=0.1,node_indim=-1,nconv_edge=1,
):
self.node_dim=node_dim
self.edge_dim=edge_dim
self.n_classes=nb_classes
self.num_layers=num_layers
self.learning_rate=learning_rate
self.activation=tf.nn.tanh
#self.activation=tf.nn.relu
self.mu=mu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.stack_instead_add=False
self.nconv_edge=nconv_edge
self.residual_connection=False#deprecated
self.shared_We = False#deprecated
self.optim_mode=0 #deprecated
self.init_fixed=False #ignore --for test purpose
self.logit_convolve=False#ignore --for test purpose
self.train_Wn0=True #ignore --for test purpose
self.dropout_rate_edge_feat= 0.0
self.dropout_rate_edge = 0.0
self.dropout_rate_node = 0.0
self.dropout_rate_H = 0.0
self.use_conv_weighted_avg=False
self.use_edge_mlp=False
self.edge_mlp_dim = 5
self.sum_attention=False
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
def set_learning_options(self,dict_model_config):
"""
Set all learning options that not directly accessible from the constructor
:param kwargs:
:return:
"""
#traceln( -- dict_model_config)
for attrname,val in dict_model_config.items():
#We treat the activation function differently as we can not pickle/serialiaze python function
if attrname=='activation_name':
if val=='relu':
self.activation=tf.nn.relu
elif val=='tanh':
self.activation=tf.nn.tanh
else:
raise Exception('Invalid Activation Function')
if attrname=='stack_instead_add' or attrname=='stack_convolutions':
self.stack_instead_add=val
if attrname not in self._setter_variables:
try:
traceln(' -- set ',attrname,val)
setattr(self,attrname,val)
except AttributeError:
warnings.warn("Ignored options for ECN"+attrname+':'+val)
def fastconvolve(self,Wedge,Bedge,F,S,T,H,nconv,Sshape,nb_edge,dropout_p_edge,dropout_p_edge_feat,
stack=True, use_dropout=False,zwe=None,use_weighted_average=False,
use_edge_mlp=False,Wedge_mlp=None,Bedge_mlp=None,use_attention=False):
'''
:param Wedge: Parameter matrix for edge convolution, with hape (n_conv_edge,edge_dim)
:param F: The Edge Feature Matrix
:param S: the Source (Node,Edge) matrix in sparse format
:param T: the Target (Node,Edge) matrix in sparse format
:param H: The current node layer
:param nconv: The numbder of edge convolutions.
:param Sshape: The shapeof matrix S, and T
:param nb_edge: The number of edges
:param stack: whether to concat all the convolutions or add them
:return: a tensor P of shape ( nconv, node_dim) if stack else P is [node_dim]
'''
#F is n_edge time nconv
#TODO if stack is False we could simply sum,the convolutions and do S diag(sum)T
#It would be faster
#Drop convolution individually t
if use_dropout:
#if False:
conv_dropout_ind = tf.nn.dropout(tf.ones([nconv], dtype=tf.float32), 1 - dropout_p_edge_feat)
ND_conv = tf.diag(conv_dropout_ind)
FW_ = tf.matmul(F, Wedge, transpose_b=True) + Bedge
FW = tf.matmul(FW_,ND_conv)
elif use_edge_mlp:
#Wedge mlp is a shared variable across layer which project edge in a lower dim
FW0 = tf.nn.tanh( tf.matmul(F,Wedge_mlp) +Bedge_mlp )
traceln(' -- FW0', FW0.get_shape())
FW = tf.matmul(FW0, Wedge, transpose_b=True) + Bedge
traceln(' -- FW', FW.get_shape())
else:
FW = tf.matmul(F, Wedge, transpose_b=True) + Bedge
traceln(' -- FW', FW.get_shape())
self.conv =tf.unstack(FW,axis=1)
Cops=[]
alphas=[]
Tr = tf.SparseTensor(indices=T, values=tf.ones([nb_edge], dtype=tf.float32), dense_shape=[Sshape[1],Sshape[0]])
Tr = tf.sparse_reorder(Tr)
TP = tf.sparse_tensor_dense_matmul(Tr,H)
if use_attention:
attn_params = va = init_glorot([2, int(self.node_dim)])
for i, cw in enumerate(self.conv):
#SD= tf.SparseTensor(indices=S,values=cw,dense_shape=[nb_node,nb_edge])
#Warning, pay attention to the ordering of edges
if use_weighted_average:
cw = zwe[i]*cw
if use_dropout:
cwd = tf.nn.dropout(cw, 1.0 -dropout_p_edge)
SD = tf.SparseTensor(indices=S, values=cwd, dense_shape=Sshape)
else:
SD = tf.SparseTensor(indices=S, values=cw, dense_shape=Sshape)
SD =tf.sparse_reorder(SD)
#Does this dropout depends on the convolution ?
#if use_dropout:
# SD = tf.nn.dropout(SD, 1.0 - dropout_p_edge)
Hi =tf.sparse_tensor_dense_matmul(SD,TP)
Cops.append(Hi)
if use_attention:
attn_val = tf.reduce_sum(tf.multiply(attn_params[0], H) + tf.multiply(attn_params[1], Hi), axis=1)
alphas.append(attn_val)
if stack is True:
#If stack we concatenate all the different convolutions
P=tf.concat(Cops,1)
elif use_attention:
alphas_s = tf.stack(alphas, axis=1)
alphas_l = tf.nn.softmax(tf.nn.leaky_relu(alphas_s))
#Not Clean to use the dropout for the edge feat
#Is this dropout necessary Here ? could do without
alphas_do =tf.nn.dropout(alphas_l,1 - dropout_p_edge_feat)
wC = [tf.multiply(tf.expand_dims(tf.transpose(alphas_do)[i], 1), C) for i, C in enumerate(Cops)]
P = tf.add_n(wC)
else:
#Else we take the mean
P=1.0/(tf.cast(nconv,tf.float32))*tf.add_n(Cops)
#traceln('p_add_n',P.get_shape())
return P
@staticmethod
def logitconvolve_fixed(pY,Yt,A_indegree):
'''
Tentative Implement of a fixed logit convolve without taking into account edge features
'''
#warning we should test that Yt is column normalized
pY_Yt = tf.matmul(pY,Yt,transpose_b=True)
#TODO A is dense but shoudl be sparse ....
P =tf.matmul(A_indegree,pY_Yt)
return P
def create_model_stack_convolutions(self):
#Create All the Variables
for i in range(self.num_layers - 1):
if i == 0:
Wnli = tf.Variable(
tf.random_uniform((self.node_dim * self.nconv_edge + self.node_dim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl', dtype=tf.float32)
else:
Wnli = tf.Variable(
tf.random_uniform((self.node_indim * self.nconv_edge + self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl', dtype=tf.float32)
traceln(' -- Wnli shape', Wnli.get_shape())
Bnli = tf.Variable(tf.zeros([self.node_indim]), name='Bnl' + str(i), dtype=tf.float32)
Weli = init_glorot([int(self.nconv_edge), int(self.edge_dim)], name='Wel_')
# Weli = tf.Variable(tf.random_normal([int(self.nconv_edge), int(self.edge_dim)], mean=0.0, stddev=1.0),
# dtype=np.float32, name='Wel_')
Beli = tf.Variable(0.01 * tf.ones([self.nconv_edge]), name='Bel' + str(i), dtype=tf.float32)
self.Wnode_layers.append(Wnli)
self.Bnode_layers.append(Bnli)
self.Wed_layers.append(Weli)
self.Bed_layers.append(Beli)
self.train_var.extend((self.Wnode_layers))
self.train_var.extend((self.Wed_layers))
self.Hnode_layers = []
self.W_classif = tf.Variable(
tf.random_uniform((self.node_indim * self.nconv_edge + self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif", dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif', dtype=np.float32)
self.train_var.append((self.W_classif))
self.train_var.append((self.B_classif))
self.node_dropout_ind = tf.nn.dropout(tf.ones([self.nb_node], dtype=tf.float32), 1 - self.dropout_p_node)
self.ND = tf.diag(self.node_dropout_ind)
edge_dropout = self.dropout_rate_edge > 0.0 or self.dropout_rate_edge_feat > 0.0
traceln(' -- Edge Dropout', edge_dropout, self.dropout_rate_edge, self.dropout_rate_edge_feat)
if self.num_layers == 1:
self.H = self.activation(tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
self.hidden_layers = [self.H]
traceln(" -- H shape", self.H.get_shape())
P = self.fastconvolve(self.Wel0, self.Bel0, self.F, self.Ssparse, self.Tsparse, self.H, self.nconv_edge,
self.Sshape, self.nb_edge,
self.dropout_p_edge, self.dropout_p_edge_feat, stack=self.stack_instead_add,
use_dropout=edge_dropout)
Hp = tf.concat([self.H, P], 1)
# Hp= P+self.H
Hi = self.activation(Hp)
# Hi_shape = Hi.get_shape()
# traceln(Hi_shape)
self.hidden_layers.append(Hi)
elif self.num_layers > 1:
if self.dropout_rate_node > 0.0:
#H0 = self.activation(tf.matmul(self.ND, tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0)))
H0 = tf.matmul(self.ND, tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
else:
#H0 = self.activation(tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
H0 = tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0)
self.Hnode_layers.append(H0)
# TODO Default to fast convolve but we change update configs, train and test flags
P = self.fastconvolve(self.Wel0, self.Bel0, self.F, self.Ssparse, self.Tsparse, H0, self.nconv_edge,
self.Sshape, self.nb_edge,
self.dropout_p_edge, self.dropout_p_edge_feat, stack=self.stack_instead_add,
use_dropout=edge_dropout,
)
Hp = tf.concat([H0, P], 1)
# TODO add activation Here.
self.hidden_layers = [self.activation(Hp)]
#self.hidden_layers = [Hp]
for i in range(self.num_layers - 1):
if self.dropout_rate_H > 0.0:
Hi_ = tf.nn.dropout(tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i],
1 - self.dropout_p_H)
else:
Hi_ = tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i]
if self.residual_connection:
Hi_ = tf.add(Hi_, self.Hnode_layers[-1])
self.Hnode_layers.append(Hi_)
# traceln('Hi_shape', Hi_.get_shape())
#traceln('Hi prevous shape', self.hidden_layers[-1].get_shape())
P = self.fastconvolve(self.Wed_layers[i], self.Bed_layers[i], self.F, self.Ssparse, self.Tsparse, Hi_,
self.nconv_edge, self.Sshape, self.nb_edge,
self.dropout_p_edge, self.dropout_p_edge_feat, stack=self.stack_instead_add,
use_dropout=edge_dropout,
)
Hp = tf.concat([Hi_, P], 1)
Hi = self.activation(Hp)
self.hidden_layers.append(Hi)
def create_model_sum_convolutions(self):
#self.Wed_layers.append(Wel0)
for i in range(self.num_layers-1):
if i==0:
# Animesh's code
Wnli = tf.Variable(tf.random_uniform((2 * self.node_dim, self.node_indim),
#Wnli = tf.Variable(tf.random_uniform((2 * self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl',
dtype=tf.float32)
else:
Wnli =tf.Variable(tf.random_uniform( (2*self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)),name='Wnl',dtype=tf.float32)
Bnli = tf.Variable(tf.zeros([self.node_indim]), name='Bnl'+str(i),dtype=tf.float32)
Weli= init_glorot([int(self.nconv_edge), int(self.edge_dim)], name='Wel_')
#Weli = tf.Variable(tf.random_normal([int(self.nconv_edge), int(self.edge_dim)], mean=0.0, stddev=1.0),
# dtype=np.float32, name='Wel_')
Beli = tf.Variable(0.01*tf.ones([self.nconv_edge]), name='Bel'+str(i),dtype=tf.float32)
self.Wnode_layers.append(Wnli)
self.Bnode_layers.append(Bnli)
self.Wed_layers.append (Weli)
self.Bed_layers.append(Beli)
self.train_var.extend((self.Wnode_layers))
self.train_var.extend((self.Wed_layers))
self.Hnode_layers=[]
self.W_classif = tf.Variable(tf.random_uniform((2*self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
self.train_var.append((self.W_classif))
self.train_var.append((self.B_classif))
self.node_dropout_ind = tf.nn.dropout(tf.ones([self.nb_node], dtype=tf.float32), 1 - self.dropout_p_node)
self.ND = tf.diag(self.node_dropout_ind)
edge_dropout = self.dropout_rate_edge> 0.0 or self.dropout_rate_edge_feat > 0.0
traceln(' -- Edge Dropout',edge_dropout, self.dropout_rate_edge,self.dropout_rate_edge_feat)
if self.num_layers==1:
self.H = self.activation(tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
self.hidden_layers = [self.H]
traceln(" -- H shape",self.H.get_shape())
P = self.fastconvolve(self.Wel0,self.Bel0,self.F,self.Ssparse,self.Tsparse,self.H,self.nconv_edge,self.Sshape,self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat,stack=self.stack_instead_add,use_dropout=edge_dropout,
use_attention=self.sum_attention
)
Hp = tf.concat([self.H, P], 1)
Hi=self.activation(Hp)
self.hidden_layers.append(Hi)
elif self.num_layers>1:
if self.dropout_rate_node>0.0:
H0 = self.activation(tf.matmul(self.ND,tf.add(tf.matmul(self.node_input,self.Wnl0), self.Bnl0)))
else:
H0 = self.activation(tf.add(tf.matmul(self.node_input,self.Wnl0),self.Bnl0))
self.Hnode_layers.append(H0)
#TODO Default to fast convolve but we change update configs, train and test flags
P = self.fastconvolve(self.Wel0,self.Bel0, self.F, self.Ssparse, self.Tsparse, H0, self.nconv_edge, self.Sshape,self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat, stack=self.stack_instead_add, use_dropout=edge_dropout,
use_attention=self.sum_attention
)
if self.use_conv_weighted_avg:
Hp = self.zH[0] * H0 + P
else:
Hp = tf.concat([H0, P], 1)
#TODO add activation Here.
#self.hidden_layers = [self.activation(Hp)]
self.hidden_layers = [Hp]
for i in range(self.num_layers-1):
if self.dropout_rate_H > 0.0:
Hi_ = tf.nn.dropout(tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i], 1-self.dropout_p_H)
else:
Hi_ = tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i]
if self.residual_connection:
Hi_= tf.add(Hi_,self.Hnode_layers[-1])
self.Hnode_layers.append(Hi_)
#traceln('Hi_shape',Hi_.get_shape())
# traceln('Hi prevous shape',self.hidden_layers[-1].get_shape())
P = self.fastconvolve(self.Wed_layers[i],self.Bed_layers[i], self.F, self.Ssparse, self.Tsparse, Hi_, self.nconv_edge,self.Sshape, self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat, stack=self.stack_instead_add, use_dropout=edge_dropout
)
Hp = tf.concat([Hi_, P], 1)
Hi = self.activation(Hp)
self.hidden_layers.append(Hi)
def create_model(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32, (), name='nb_node')
self.nb_edge = tf.placeholder(tf.int32, (), name='nb_edge')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
self.dropout_p_H = tf.placeholder(tf.float32, (), name='dropout_prob_H')
self.dropout_p_node = tf.placeholder(tf.float32, (), name='dropout_prob_N')
self.dropout_p_edge = tf.placeholder(tf.float32, (), name='dropout_prob_edges')
self.dropout_p_edge_feat = tf.placeholder(tf.float32, (), name='dropout_prob_edgefeat')
self.S = tf.placeholder(tf.float32, name='S')
self.Ssparse = tf.placeholder(tf.int64, name='Ssparse') # indices
self.Sshape = tf.placeholder(tf.int64, name='Sshape') # indices
self.T = tf.placeholder(tf.float32, [None, None], name='T')
self.Tsparse = tf.placeholder(tf.int64, name='Tsparse')
self.F = tf.placeholder(tf.float32, [None, None], name='F')
std_dev_in = float(1.0 / float(self.node_dim))
self.Wnode_layers = []
self.Bnode_layers = []
self.Wed_layers = []
self.Bed_layers = []
self.zed_layers = []
self.Wedge_mlp_layers = []
# Should Project edge as well ...
self.train_var = []
# if self.node_indim!=self.node_dim:
# Wnl0 = tf.Variable(tf.random_uniform((self.node_dim, self.node_indim),
# -1.0 / math.sqrt(self.node_dim),
# 1.0 / math.sqrt(self.node_dim)),name='Wnl0',dtype=tf.float32)
#
self.Wnl0 = tf.Variable(tf.eye(self.node_dim), name='Wnl0', dtype=tf.float32, trainable=self.train_Wn0)
self.Bnl0 = tf.Variable(tf.zeros([self.node_dim]), name='Bnl0', dtype=tf.float32)
if self.init_fixed: #For testing Purposes
self.Wel0 = tf.Variable(100 * tf.ones([int(self.nconv_edge), int(self.edge_dim)]), name='Wel0',
dtype=tf.float32)
# self.Wel0 =tf.Variable(tf.random_normal([int(self.nconv_edge),int(self.edge_dim)],mean=0.0,stddev=1.0), dtype=np.float32, name='Wel0')
self.Wel0 = init_glorot([int(self.nconv_edge), int(self.edge_dim)], name='Wel0')
self.Bel0 = tf.Variable(0.01 * tf.ones([self.nconv_edge]), name='Bel0', dtype=tf.float32)
traceln(' -- Wel0', self.Wel0.get_shape())
self.train_var.extend([self.Wnl0, self.Bnl0])
self.train_var.append(self.Wel0)
if self.stack_instead_add:
self.create_model_stack_convolutions()
else:
self.create_model_sum_convolutions()
self.logits = tf.add(tf.matmul(self.hidden_layers[-1], self.W_classif), self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
# Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.predict_proba = tf.nn.softmax(self.logits)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.gv_Gn = []
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver(max_to_keep=0)
traceln(' -- Number of Params: ', self.get_nb_params())
def create_model_old(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.nb_edge = tf.placeholder(tf.int32, (), name='nb_edge')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
#self.EA_input = tf.placeholder(tf.float32, name='EA_input')
#self.NA_input = tf.placeholder(tf.float32, name='NA_input')
self.dropout_p_H = tf.placeholder(tf.float32,(), name='dropout_prob_H')
self.dropout_p_node = tf.placeholder(tf.float32, (), name='dropout_prob_N')
self.dropout_p_edge = tf.placeholder(tf.float32, (), name='dropout_prob_edges')
self.dropout_p_edge_feat = tf.placeholder(tf.float32, (), name='dropout_prob_edgefeat')
self.S = tf.placeholder(tf.float32, name='S')
self.Ssparse = tf.placeholder(tf.int64, name='Ssparse') #indices
self.Sshape = tf.placeholder(tf.int64, name='Sshape') #indices
self.T = tf.placeholder(tf.float32,[None,None], name='T')
self.Tsparse = tf.placeholder(tf.int64, name='Tsparse')
#self.S_indice = tf.placeholder(tf.in, [None, None], name='S')
self.F = tf.placeholder(tf.float32,[None,None], name='F')
#self.NA_indegree = tf.placeholder(tf.float32, name='NA_indegree')
std_dev_in=float(1.0/ float(self.node_dim))
self.Wnode_layers=[]
self.Bnode_layers=[]
self.Wed_layers=[]
self.Bed_layers=[]
#REFACT1 self.zed_layers = []
#REFACT1 self.Wedge_mlp_layers=[]
#Should Project edge as well ...
self.train_var=[]
#if self.node_indim!=self.node_dim:
# Wnl0 = tf.Variable(tf.random_uniform((self.node_dim, self.node_indim),
# -1.0 / math.sqrt(self.node_dim),
# 1.0 / math.sqrt(self.node_dim)),name='Wnl0',dtype=tf.float32)
#else:
self.Wnl0 = tf.Variable(tf.eye(self.node_dim),name='Wnl0',dtype=tf.float32,trainable=self.train_Wn0)
self.Bnl0 = tf.Variable(tf.zeros([self.node_dim]), name='Bnl0',dtype=tf.float32)
#self.Wel0 =tf.Variable(tf.random_normal([int(self.nconv_edge),int(self.edge_dim)],mean=0.0,stddev=1.0), dtype=np.float32, name='Wel0')
if self.init_fixed:
self.Wel0 = tf.Variable(100*tf.ones([int(self.nconv_edge),int(self.edge_dim)]), name='Wel0',dtype=tf.float32)
elif self.use_edge_mlp:
self.Wel0 = init_glorot([int(self.nconv_edge), int(self.edge_mlp_dim)], name='Wel0')
else:
self.Wel0 = init_glorot([int(self.nconv_edge),int(self.edge_dim)],name='Wel0')
self.Bel0 = tf.Variable(0.01*tf.ones([self.nconv_edge]), name='Bel0' , dtype=tf.float32)
#RF self.zel0 = tf.Variable(tf.ones([self.nconv_edge]), name='zel0' , dtype=tf.float32)
#RF self.zH = tf.Variable(tf.ones([self.num_layers]),name='zH',dtype=tf.float32)
traceln(' -- Wel0',self.Wel0.get_shape())
self.train_var.extend([self.Wnl0,self.Bnl0])
self.train_var.append(self.Wel0)
#Parameter for convolving the logits
''' REFACT1
if self.logit_convolve:
#self.Wel_logits = init_glorot([int(self.nconv_edge),int(self.edge_dim)],name='Wel_logit')
#self.Belg = tf.Variable(tf.zeros( [int(self.nconv_edge)]), name='Belogit' , dtype=tf.float32)
self.Wel_logits = tf.Variable(tf.zeros([int(1),int(self.edge_dim)]), name='Wlogit0',dtype=tf.float32,trainable=False)
self.Belg = tf.Variable(tf.ones( [int(1)]), name='Belogit' , dtype=tf.float32)
#self.logits_Transition = 1.0*tf.Variable(tf.ones([int(self.n_classes) , int(self.n_classes)]), name='logit_Transition')
self.logits_Transition=init_glorot([int(self.n_classes), int(self.n_classes)], name='Wel_')
self.Wmlp_edge_0= init_glorot([int(self.edge_dim), int(self.edge_mlp_dim)], name='Wedge_mlp')
self.Bmlp_edge_0= tf.Variable(tf.ones([self.edge_mlp_dim]),name='Wedge_mlp',dtype=tf.float32)
'''
#self.Wed_layers.append(Wel0)
for i in range(self.num_layers-1):
if self.stack_instead_add:
if i==0:
Wnli = tf.Variable(
tf.random_uniform((self.node_dim * self.nconv_edge + self.node_dim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl', dtype=tf.float32)
else:
Wnli =tf.Variable(tf.random_uniform( (self.node_indim*self.nconv_edge+self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)),name='Wnl',dtype=tf.float32)
traceln(' -- Wnli shape',Wnli.get_shape())
elif self.use_conv_weighted_avg:
Wnli = tf.Variable(
tf.random_uniform((self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl', dtype=tf.float32)
#Wnli = tf.eye(self.node_dim,dtype=tf.float32)
traceln(' -- Wnli shape', Wnli.get_shape())
else:
if i==0:
Wnli = tf.Variable(tf.random_uniform((2 * self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)), name='Wnl',
dtype=tf.float32)
else:
Wnli =tf.Variable(tf.random_uniform( (2*self.node_indim, self.node_indim),
-1.0 / math.sqrt(self.node_indim),
1.0 / math.sqrt(self.node_indim)),name='Wnl',dtype=tf.float32)
Bnli = tf.Variable(tf.zeros([self.node_indim]), name='Bnl'+str(i),dtype=tf.float32)
#Weli = tf.Variable(tf.ones([int(self.nconv_edge),int(self.edge_dim)],dtype=tf.float32))
if self.use_edge_mlp:
#self.Wel0 = init_glorot([int(self.nconv_edge), int(self.edge_mlp_dim)], name='Wel0')
Weli = init_glorot([int(self.nconv_edge), int(self.edge_mlp_dim)], name='Wel_')
Beli = tf.Variable(0.01 * tf.ones([self.nconv_edge]), name='Bel' + str(i), dtype=tf.float32)
#RF Wmlp_edge_i = init_glorot([int(self.edge_dim), int(self.edge_mlp_dim)], name='Wedge_mlp'+str(i))
#RF Bmlp_edge_i = tf.Variable(tf.ones([self.edge_mlp_dim]), name='Bedge_mlp'+str(i), dtype=tf.float32)
#RF self.Wedge_mlp_layers.append(Wmlp_edge_i)
else:
Weli= init_glorot([int(self.nconv_edge), int(self.edge_dim)], name='Wel_')
#Weli = tf.Variable(tf.random_normal([int(self.nconv_edge), int(self.edge_dim)], mean=0.0, stddev=1.0),
# dtype=np.float32, name='Wel_')
Beli = tf.Variable(0.01*tf.ones([self.nconv_edge]), name='Bel'+str(i),dtype=tf.float32)
#RF Wmlp_edge_i = init_glorot([int(self.edge_dim), int(self.edge_mlp_dim)], name='Wedge_mlp' + str(i))
#RF Bmlp_edge_i = tf.Variable(tf.ones([self.edge_mlp_dim]), name='Bedge_mlp' + str(i), dtype=tf.float32)
#RF self.Wedge_mlp_layers.append(Wmlp_edge_i)
#zeli = tf.Variable(tf.ones([self.nconv_edge]),name='zel'+str(i),dtype=tf.float32)
self.Wnode_layers.append(Wnli)
self.Bnode_layers.append(Bnli)
self.Wed_layers.append (Weli)
self.Bed_layers.append(Beli)
#self.zed_layers.append(zeli)
self.train_var.extend((self.Wnode_layers))
self.train_var.extend((self.Wed_layers))
self.Hnode_layers=[]
#TODO Do we project the firt layer or not ?
# Initialize the weights and biases for a simple one full connected network
if self.stack_instead_add:
self.W_classif = tf.Variable(tf.random_uniform((self.node_indim*self.nconv_edge+self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
elif self.use_conv_weighted_avg:
self.W_classif = tf.Variable(tf.random_uniform((self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif", dtype=np.float32)
else:
self.W_classif = tf.Variable(tf.random_uniform((2*self.node_indim, self.n_classes),
-1.0 / math.sqrt(self.node_dim),
1.0 / math.sqrt(self.node_dim)),
name="W_classif",dtype=np.float32)
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
self.train_var.append((self.W_classif))
self.train_var.append((self.B_classif))
#Use for true add
#I = tf.eye(self.nb_node)
self.node_dropout_ind = tf.nn.dropout(tf.ones([self.nb_node], dtype=tf.float32), 1 - self.dropout_p_node)
self.ND = tf.diag(self.node_dropout_ind)
edge_dropout = self.dropout_rate_edge> 0.0 or self.dropout_rate_edge_feat > 0.0
traceln(' -- Edge Dropout',edge_dropout, self.dropout_rate_edge,self.dropout_rate_edge_feat)
if self.num_layers==1:
self.H = self.activation(tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0))
self.hidden_layers = [self.H]
traceln("H shape",self.H.get_shape())
P = self.fastconvolve(self.Wel0,self.Bel0,self.F,self.Ssparse,self.Tsparse,self.H,self.nconv_edge,self.Sshape,self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat,stack=self.stack_instead_add,use_dropout=edge_dropout)
Hp = tf.concat([self.H, P], 1)
#Hp= P+self.H
Hi=self.activation(Hp)
#Hi_shape = Hi.get_shape()
#traceln(Hi_shape)
self.hidden_layers.append(Hi)
elif self.num_layers>1:
if self.dropout_rate_node>0.0:
H0 = self.activation(tf.matmul(self.ND,tf.add(tf.matmul(self.node_input, self.Wnl0), self.Bnl0)))
else:
H0 = self.activation(tf.add(tf.matmul(self.node_input,self.Wnl0),self.Bnl0))
self.Hnode_layers.append(H0)
#TODO Default to fast convolve but we change update configs, train and test flags
P = self.fastconvolve(self.Wel0,self.Bel0, self.F, self.Ssparse, self.Tsparse, H0, self.nconv_edge, self.Sshape,self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat, stack=self.stack_instead_add, use_dropout=edge_dropout,
)
#RF zwe=self.zel0,
#RF use_weighted_average=self.use_conv_weighted_avg,
#RF use_edge_mlp=self.use_edge_mlp,
#RFWedge_mlp=self.Wmlp_edge_0,
#RF Bedge_mlp=self.Bmlp_edge_0)
if self.use_conv_weighted_avg:
Hp = self.zH[0] * H0 + P
else:
Hp = tf.concat([H0, P], 1)
#TODO add activation Here.
#self.hidden_layers = [self.activation(Hp)]
self.hidden_layers = [Hp]
for i in range(self.num_layers-1):
if self.dropout_rate_H > 0.0:
Hi_ = tf.nn.dropout(tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i], 1-self.dropout_p_H)
else:
Hi_ = tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i]) + self.Bnode_layers[i]
if self.residual_connection:
Hi_= tf.add(Hi_,self.Hnode_layers[-1])
self.Hnode_layers.append(Hi_)
# traceln(' -- Hi_shape',Hi_.get_shape())
# traceln(' -- Hi prevous shape',self.hidden_layers[-1].get_shape())
P = self.fastconvolve(self.Wed_layers[i],self.Bed_layers[i], self.F, self.Ssparse, self.Tsparse, Hi_, self.nconv_edge,self.Sshape, self.nb_edge,
self.dropout_p_edge,self.dropout_p_edge_feat, stack=self.stack_instead_add, use_dropout=edge_dropout,
)
# zwe=self.zed_layers[i],
# use_weighted_average=self.use_conv_weighted_avg,
# use_edge_mlp=self.use_edge_mlp,
# Wedge_mlp=self.Wedge_mlp_layers[i],
#RF Bedge_mlp=self.Bmlp_edge_0)
if self.use_conv_weighted_avg:
Hp = self.zH[i+1]* Hi_ + P
else:
Hp = tf.concat([Hi_, P], 1)
Hi = self.activation(Hp)
self.hidden_layers.append(Hi)
self.logits =tf.add(tf.matmul(self.hidden_layers[-1],self.W_classif),self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
# Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.predict_proba = tf.nn.softmax(self.logits)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.gv_Gn=[]
#TODO Experiment with gradient noise
#if self.stack_instead_add:
# for grad, var in self.grads_and_vars:
# traceln(grad,var)
# if grad is not None:
# self.gv_Gn.append( ( tf.add(grad, tf.random_normal(tf.shape(grad), stddev=0.00001)),var) )
#self.gv_Gn = [(tf.add(grad, tf.random_normal(tf.shape(grad), stddev=0.00001)), val) for grad, val in self.grads_and_vars if is not None]
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver= tf.train.Saver(max_to_keep=0)
traceln(' -- Number of Params: ', self.get_nb_params())
def save_model(self, session, model_filename):
traceln("Saving Model")
save_path = self.saver.save(session, model_filename)
def restore_model(self, session, model_filename):
self.saver.restore(session, model_filename)
traceln("Model restored.")
def get_Wedge(self,session):
'''
Return the parameters for the Edge Convolutions
:param session:
:return:
'''
if self.num_layers>1:
L0=session.run([self.Wel0,self.Wed_layers])
We0=L0[0]
list_we=[We0]
for we in L0[1]:
list_we.append(we)
return list_we
else:
L0=session.run([self.Wel0])
We0=L0[0]
list_we=[We0]
return list_we
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln(' -- Train',X.shape,EA.shape)
#traceln(' -- DropoutEdges',self.dropout_rate_edge)
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
self.F: graph.F,
self.y_input: graph.Y,
self.dropout_p_H: self.dropout_rate_H,
self.dropout_p_node: self.dropout_rate_node,
self.dropout_p_edge: self.dropout_rate_edge,
self.dropout_p_edge_feat: self.dropout_rate_edge_feat,
#self.NA_indegree:graph.NA_indegree
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
self.F: graph.F,
self.y_input: graph.Y,
self.dropout_p_H: 0.0,
self.dropout_p_node: 0.0,
self.dropout_p_edge: 0.0,
self.dropout_p_edge_feat: 0.0,
#self.NA_indegree: graph.NA_indegree
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
# fast_gcn.S: np.asarray(graph.S.todense()).squeeze(),
# fast_gcn.Ssparse: np.vstack([graph.S.row,graph.S.col]),
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
# fast_gcn.T: np.asarray(graph.T.todense()).squeeze(),
self.F: graph.F,
self.dropout_p_H: 0.0,
self.dropout_p_node: 0.0,
self.dropout_p_edge: 0.0,
self.dropout_p_edge_feat: 0.0,
#self.NA_indegree: graph.NA_indegree
}
Ops = session.run([self.pred], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
print(str(Ops))
return Ops[0]
def prediction_prob(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
# fast_gcn.S: np.asarray(graph.S.todense()).squeeze(),
# fast_gcn.Ssparse: np.vstack([graph.S.row,graph.S.col]),
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
# fast_gcn.T: np.asarray(graph.T.todense()).squeeze(),
self.F: graph.F,
self.dropout_p_H: 0.0,
self.dropout_p_node: 0.0,
self.dropout_p_edge: 0.0,
self.dropout_p_edge_feat: 0.0,
#self.NA_indegree: graph.NA_indegree
}
Ops = session.run([self.predict_proba], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
return Ops[0]
def train_All_lG(self,session,graph_train,graph_val, max_iter, eval_iter = 10, patience = 7, graph_test = None, save_model_path = None):
'''
Merge all the graph and train on them
:param session:
:param graph_train: the list of graph to train on
:param graph_val: the list of graph used for validation
:param max_iter: maximum number of epochs
:param eval_iter: evaluate every eval_iter
:param patience: stopped training if accuracy is not improved on the validation set after patience_value
:param graph_test: Optional. If a test set is provided, then accuracy on the test set is reported
:param save_model_path: checkpoints filename to save the model.
:return: A Dictionary with training accuracies, validations accuracies and test accuracies if any, and the Wedge parameters
'''
best_val_acc = 0.0
wait = 0
stop_training = False
stopped_iter = max_iter
train_accuracies = []
validation_accuracies = []
test_accuracies = []
conf_mat = []
start_monitoring_val_acc = False
# Not Efficient to compute this for
merged_graph = gcn_datasets.GCNDataset.merge_allgraph(graph_train)
self.train(session, merged_graph, n_iter=1)
for i in range(max_iter):
if stop_training:
break
if i % eval_iter == 0:
traceln('\n -- Epoch', i)
_, tr_acc = self.test_lG(session, graph_train, verbose=False)
traceln(' -- Train Acc', '%.4f' % tr_acc)
train_accuracies.append(tr_acc)
_, node_acc = self.test_lG(session, graph_val, verbose=False)
traceln(' -- Valid Acc', '%.4f' % node_acc)
validation_accuracies.append(node_acc)
if save_model_path:
save_path = self.saver.save(session, save_model_path, global_step=i)
if graph_test:
_, test_acc = self.test_lG(session, graph_test, verbose=False)
traceln(' -- Test Acc', '%.4f' % test_acc)
test_accuracies.append(test_acc)
# Ypred = self.predict_lG(session, graph_test,verbose=False)
# Y_true_flat = []
# Ypred_flat = []
# for graph, ypred in zip(graph_test, Ypred):
# ytrue = np.argmax(graph.Y, axis=1)
# Y_true_flat.extend(ytrue)
# Ypred_flat.extend(ypred)
# cm = sklearn.metrics.confusion_matrix(Y_true_flat, Ypred_flat)
# conf_mat.append(cm)
# TODO min_delta
# if tr_acc>0.99:
# start_monitoring_val_acc=True
if node_acc > best_val_acc:
best_val_acc = node_acc
wait = 0
else:
if wait >= patience:
stopped_iter = i
stop_training = True
wait += 1
else:
self.train(session, merged_graph, n_iter=1)
# Final Save
# if save_model_path:
# save_path = self.saver.save(session, save_model_path, global_step=i)
# TODO Add the final step
mean_acc = []
traceln(' -- Stopped Model Training after ', stopped_iter)
traceln(' -- Val Accuracies ', validation_accuracies)
traceln(' -- Final Training Accuracy')
_, node_train_acc = self.test_lG(session, graph_train)
traceln(' -- Train Mean Accuracy', '%.4f' % node_train_acc)
traceln(' -- Final Valid Acc')
self.test_lG(session, graph_val)
R = {}
R['train_acc'] = train_accuracies
R['val_acc'] = validation_accuracies
R['test_acc'] = test_accuracies
R['stopped_iter'] = stopped_iter
R['confusion_matrix'] = conf_mat
# R['W_edge'] =self.get_Wedge(session)
if graph_test:
_, final_test_acc = self.test_lG(session, graph_test)
traceln(' -- Final Test Acc', '%.4f' % final_test_acc)
R['final_test_acc'] = final_test_acc
return R
class GraphConvNet(MultiGraphNN):
'''
A Deep Standard GCN model for a graph list
'''
def __init__(self,node_dim,nb_classes,num_layers=1,learning_rate=0.1,mu=0.1,node_indim=-1,
dropout_rate=0.0,dropout_mode=0):
self.node_dim=node_dim
self.n_classes=nb_classes
self.num_layers=num_layers
self.learning_rate=learning_rate
self.activation=tf.nn.relu
self.mu=mu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.convolve_last=False
self.dropout_rate=dropout_rate
#0 No dropout 1, Node Dropout at input 2 Standard dropout for layer
# check logit layer
self.dropout_mode=dropout_mode
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
def create_model(self):
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
self.NA_input = tf.placeholder(tf.float32, name='NA_input') #Normalized Adjacency Matrix Here
self.dropout_p = tf.placeholder(tf.float32,(), name='dropout_prob')
#std_dev_in=float(1.0/ float(self.node_dim))
self.Wnode_layers=[]
self.Bnode_layers=[]
std_dev_input=float(1.0/ float(self.node_dim))
std_dev_indim=float(1.0/ float(self.node_indim))
if self.node_indim!=self.node_dim:
self.Wnode = init_glorot((self.node_dim,self.node_indim),name='Wn0')
#self.Wnode = init_normal((self.node_dim, self.node_indim),std_dev_input,name='Wn0')
else:
self.Wnode = tf.Variable(tf.eye(self.node_dim),name='Wn0',dtype=tf.float32)
self.Bnode = tf.Variable(tf.zeros([self.node_indim]), name='Bnode',dtype=tf.float32)
for i in range(self.num_layers-1):
Wnli =init_glorot((2*self.node_indim, self.node_indim),name='Wnl'+str(i))
#Wnli = init_normal((self.node_indim, self.node_indim),std_dev_indim, name='Wnl' + str(i))
self.Wnode_layers.append(Wnli)
#The GCN do not seem to use a bias term
#Bnli = tf.Variable(tf.zeros([self.node_indim]), name='Bnl'+str(i),dtype=tf.float32)
#self.Bnode_layers.append(Bnli)
self.W_classif = init_glorot((2*self.node_indim, self.n_classes),name="W_classif")
self.B_classif = tf.Variable(tf.zeros([self.n_classes]), name='B_classif',dtype=np.float32)
#Input Layer
#Check the self-loop . Is included in the normalized adjacency matrix
#Check Residual Connections as weel for deeper models
#add dropout_placeholder ... to differentiate train and test
#x = tf.nn.dropout(x, 1 - self.dropout)
#Dropout some nodes at the input of the graph ?
#Should I dropout in upper layers as well ?
#This way this forces the net to infer the node labels from its neighbors only
#Here I dropout the features, but node the edges ..
self.node_dropout_ind = tf.nn.dropout(tf.ones([self.nb_node],dtype=tf.float32),1-self.dropout_p)
self.ND = tf.diag(self.node_dropout_ind)
if self.dropout_mode==1:
#self.H = self.activation(tf.matmul(self.ND,tf.add(tf.matmul(self.node_input, self.Wnode), self.Bnode)))
P0 = self.activation(tf.matmul(self.ND, tf.add(tf.matmul(self.node_input, self.Wnode), self.Bnode)))
self.hidden_layers = [self.H]
else:
H0 =tf.add(tf.matmul(self.node_input, self.Wnode), self.Bnode)
P0 =tf.matmul(self.NA_input, H0) # we should forget the self loop
H0_ = self.activation(tf.concat([H0, P0], 1))
self.hidden_layers=[H0_]
#self.H = self.activation(tf.add(tf.matmul(self.node_input, self.Wnode), self.Bnode))
#self.hidden_layers = [self.H]
for i in range(self.num_layers-1):
if self.dropout_mode==2:
Hp = tf.nn.dropout(self.hidden_layers[-1],1-self.dropout_p)
Hi_ = tf.matmul(Hp, self.Wnode_layers[i])
else:
Hi_ = tf.matmul(self.hidden_layers[-1], self.Wnode_layers[i])
P =tf.matmul(self.NA_input, Hi_) #we should forget the self loop
#Hp = tf.concat([H0, P], 1)
Hi = self.activation(tf.concat([Hi_,P],1))
self.hidden_layers.append(Hi)
#This dropout the logits as in GCN
if self.dropout_mode==2:
Hp = tf.nn.dropout(self.hidden_layers[-1], 1 - self.dropout_p)
self.hidden_layers.append(Hp)
if self.convolve_last is True:
logit_0 = tf.add(tf.matmul(self.hidden_layers[-1], self.W_classif), self.B_classif)
self.logits = tf.matmul(self.NA_input,logit_0) #No activation function here
else:
self.logits =tf.add(tf.matmul(self.hidden_layers[-1],self.W_classif),self.B_classif)
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
#Global L2 Regulization
self.loss = tf.reduce_mean(cross_entropy_source) + self.mu * tf.nn.l2_loss(self.W_classif)
self.correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(self.logits), 1), tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
traceln(' -- Number of Params: ', self.get_nb_params())
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
def train(self,session,g,n_iter=1,verbose=False):
#TrainEvalSet Here
for i in range(n_iter):
feed_batch={
self.nb_node:g.X.shape[0],
self.node_input:g.X,
self.y_input:g.Y,
self.NA_input:g.NA,
self.dropout_p:self.dropout_rate
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,g,verbose=True):
#TrainEvalSet Here
feed_batch={
self.nb_node:g.X.shape[0],
self.node_input:g.X,
self.y_input:g.Y,
self.NA_input:g.NA,
self.dropout_p: 0.0
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
class EdgeLogit(Logit):
'''
Logistic Regression for MultiGraph
'''
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln(' -- Train',X.shape,EA.shape)
nb_edge =graph.E.shape[0]
half_edge =nb_edge/2
feed_batch = {
self.nb_node: graph.EC.shape[0], #here we pass the number of edges
self.node_input: graph.EC,
self.y_input: graph.Yedge,
#self.nb_node: half_edge, #here we pass the number of edges
#self.node_input: graph.F[:half_edge],
#self.y_input: graph.Yedge[:half_edge],
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
nb_edge = graph.E.shape[0]
half_edge = nb_edge / 2
feed_batch = {
self.nb_node: graph.EC.shape[0],
self.node_input: graph.EC,
self.y_input: graph.Yedge,
#self.nb_node: half_edge, # here we pass the number of edges
#self.node_input: graph.F[:half_edge],
#self.y_input: graph.Yedge[:half_edge],
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
nb_edge = graph.E.shape[0]
half_edge = nb_edge / 2
feed_batch = {
self.nb_node: graph.EC.shape[0],
self.node_input: graph.EC,
#self.nb_node: half_edge, # here we pass the number of edges
#self.node_input: graph.F[:half_edge],
#self.y_input: graph.Yedge[:, half_edge],
}
Ops = session.run([self.pred], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
return Ops[0]
#TODO Benchmark on Snake, GCN, ECN, graphAttNet vs Cora
#TODO Refactorize Code
#TODO Add L2 Regularization
#TODO Stack or Add Convolution -> Reduce the size
# Force the attention to preserve the node information i.e alpha'= 0.8 I +0.2 alpha
# Force doing attention only for the logit ?
# with a two layer
# Branching factor --> Attention
# Logit Layer and Attention
# There is one diff with ECN the feature for the edges are dynamically calculated
# whereas for GAT they are conditionned on the currect Node features
# 0.88
# Do a dot product attention or something different ...
# Change Initialization of the attention vector
# with Attn vector equal [x;0] the attention keeps the source features and do not propagate ...
# Should reduce Nb of parameters
# This notion of edges is completely arbritrary
# We could at all nodes in the graph to see whether there are some depencies, no ?, interesting exp
class GraphAttNet(MultiGraphNN):
'''
Graph Attention Network
'''
# Variable ignored by the set_learning_options
_setter_variables = {
"node_dim": True, "edge_dim": True, "nb_class": True,
"num_layers": True, "lr": True,
"node_indim": True, "nb_attention": True,
"nb_iter": True, "ratio_train_val": True}
def __init__(self,node_dim,nb_classes,num_layers=1,learning_rate=0.1,node_indim=-1,nb_attention=3
):
self.node_dim=node_dim
self.n_classes=nb_classes
self.num_layers=num_layers
self.learning_rate=learning_rate
self.activation=tf.nn.elu
#self.activation=tf.nn.relu
self.optalg = tf.train.AdamOptimizer(self.learning_rate)
self.stack_instead_add=False
self.residual_connection=False#deprecated
self.mu=0.0
self.dropout_rate_node = 0.0
self.dropout_rate_attention = 0.0
self.nb_attention=nb_attention
self.distinguish_node_from_neighbor=False
self.original_model=False
self.attn_type=0
self.dense_model=False
if node_indim==-1:
self.node_indim=self.node_dim
else:
self.node_indim=node_indim
#TODO GENERIC Could be move in MultigraphNN
def set_learning_options(self,dict_model_config):
"""
Set all learning options that not directly accessible from the constructor
:param kwargs:
:return:
"""
traceln(dict_model_config)
for attrname,val in dict_model_config.items():
#We treat the activation function differently as we can not pickle/serialiaze python function
if attrname=='activation_name':
if val=='relu':
self.activation=tf.nn.relu
elif val=='tanh':
self.activation=tf.nn.tanh
else:
raise Exception('Invalid Activation Function')
if attrname=='stack_instead_add' or attrname=='stack_convolutions':
self.stack_instead_add=val
if attrname not in self._setter_variables:
try:
traceln(' -- set',attrname,val)
setattr(self,attrname,val)
except AttributeError:
warnings.warn("Ignored options for ECN"+attrname+':'+val)
def dense_graph_attention_layer(self,H,W,A,nb_node,dropout_attention,dropout_node,use_dropout=False):
'''
Implement a dense attention layer where every node is connected to everybody
:param A:
:param H:
:param W:
:param dropout_attention:
:param dropout_node:
:param use_dropout:
:return:
'''
'''
for all i,j aHi + bHj
repmat all first column contains H1 second columns H2 etc
diag may be a special case
'''
with tf.name_scope('graph_att_dense_attn'):
P = tf.matmul(H, W)
Aij_forward = tf.expand_dims(A[0], 0) # attention vector for forward edge and backward edge
Aij_backward = tf.expand_dims(A[1], 0) # Here we assume it is the same on contrary to the paper
# Compute the attention weight for target node, ie a . Whj if j is the target node
att_target_node = tf.matmul(P, Aij_backward,transpose_b=True)
# Compute the attention weight for the source node, ie a . Whi if j is the target node
att_source_node = tf.matmul(P, Aij_forward, transpose_b=True)
Asrc_vect = tf.tile(att_source_node,[nb_node,1])
Asrc = tf.reshape(Asrc_vect,[nb_node,nb_node])
Atgt_vect = tf.tile(att_target_node, [nb_node,1])
Atgt = tf.reshape(Atgt_vect, [nb_node, nb_node])
Att = tf.nn.leaky_relu(Asrc+Atgt)
#Att = tf.nn.leaky_relu(Asrc)
alphas = tf.nn.softmax(Att)
# dropout is done after the softmax
if use_dropout:
traceln(' -- ... using dropout for attention layer')
alphasD = tf.nn.dropout(alphas, 1.0 - dropout_attention)
P_D = tf.nn.dropout(P, 1.0 - dropout_node)
alphasP = tf.matmul(alphasD, P_D)
return alphasD, alphasP
else:
# We compute the features given by the attentive neighborhood
alphasP = tf.matmul(alphas, P)
return alphas, alphasP
#TODO Change the transpose of the A parameter
def simple_graph_attention_layer(self,H,W,A,S,T,Adjind,Sshape,nb_edge,
dropout_attention,dropout_node,
use_dropout=False,add_self_loop=False,attn_type=0):
'''
:param H: The current node feature
:param W: The node projection for this layer
:param A: The attention weight vector: a
:param S: The source edge matrix indices
:param T: The target edge matrix indices
:param Adjind: The adjcency matrix indices
:param Sshape: Shape of S
:param nb_edge: Number of edge
:param dropout_attention: dropout_rate for the attention
:param use_dropout: wether to use dropout
:param add_self_loop: wether to add edge (i,i)
:return: alphas,nH
where alphas is the attention-based adjancency matrix alpha[i,j] correspond to alpha_ij
nH correspond to the new features for this layer ie alphas(H,W)
'''
with tf.name_scope('graph_att_net_attn'):
# This has shape (nb_node,in_dim) and correspond to the project W.h in the paper
P=tf.matmul(H,W)
#traceln(P.get_shape())
#This has shape #shape,(nb_edge,nb_node)
#This sparse tensor contains target nodes for edges.
#The indices are [edge_idx,node_target_index]
Tr = tf.SparseTensor(indices=T, values=tf.ones([nb_edge], dtype=tf.float32),
dense_shape=[Sshape[1], Sshape[0]])
Tr = tf.sparse_reorder(Tr) # reorder so that sparse operations work correctly
# This tensor has shape(nb_edge,in_dim) and contains the node target projection, ie Wh
TP = tf.sparse_tensor_dense_matmul(Tr, P,name='TP')
# This has shape #shape,(nb_node,nb_edge)
# This sparse tensor contains source nodes for edges.
# The indices are [node_source_index,edge_idx]
SD = tf.SparseTensor(indices=S, values=tf.ones([nb_edge],dtype=tf.float32), dense_shape=Sshape)
SD = tf.sparse_reorder(SD) #shape,(nb_edge,nb_node)
# This tensor has shape(nb_edge,in_dim) and contains the node source projection, ie Wh
SP = tf.sparse_tensor_dense_matmul(tf.sparse_transpose(SD), P,name='SP') #shape(nb_edge,in_dim)
#traceln(' -- SP', SP.get_shape())
#Deprecated
if attn_type==1:
#Mutlitplication Attn Module
Aij_forward = A # attention vector for forward edge and backward edge
Aij_backward = A # Here we assume it is the same on contrary to the paper
# Compute the attention weight for target node, ie a . Whj if j is the target node
att_target_node = tf.multiply(TP, Aij_forward[0])
# Compute the attention weight for the source node, ie a . Whi if j is the target node
att_source_node = tf.multiply(SP, Aij_backward[0])
# The attention values for the edge ij is the sum of attention of node i and j
# Attn( node_i, node_j) = Sum_k (a_k)^2 Hik Hjk Is this what we want ?
att_source_target_node = tf.reduce_sum( tf.multiply(att_source_node,att_target_node),axis=1)
attn_values = tf.nn.leaky_relu( att_source_target_node)
#
elif attn_type==2:
#Inspired by learning to rank approach on w(x+-x-)
# Attn( node_i, node_j) = Sum_k (a_k) (Hik- Hjk) Is this what we want ?
att_source_target_node = tf.reduce_sum( tf.multiply(SP-TP,A[0]),axis=1)
attn_values = tf.nn.leaky_relu( att_source_target_node)
else:
Aij_forward=tf.expand_dims(A[0],0) # attention vector for forward edge and backward edge
Aij_backward=tf.expand_dims(A[1],0) # Here we assume it is the same on contrary to the paper
# Compute the attention weight for target node, ie a . Whj if j is the target node
att_target_node =tf.matmul(TP,Aij_backward,transpose_b=True)
# Compute the attention weight for the source node, ie a . Whi if j is the target node
att_source_node = tf.matmul(SP,Aij_forward,transpose_b=True)
# The attention values for the edge ij is the sum of attention of node i and j
attn_values = tf.nn.leaky_relu(tf.squeeze(att_target_node) + tf.squeeze(att_source_node))
# From that we build a sparse adjacency matrix containing the correct values
# which we then feed to a sparse softmax
AttAdj = tf.SparseTensor(indices=Adjind, values=attn_values, dense_shape=[Sshape[0], Sshape[0]])
AttAdj = tf.sparse_reorder(AttAdj)
#Note very efficient to do this, we should add the loop in the preprocessing
if add_self_loop:
node_indices=tf.range(Sshape[0])
#Sparse Idendity
Aij_forward = tf.expand_dims(A[0], 0)
id_indices = tf.stack([node_indices, node_indices], axis=1)
val =tf.squeeze(tf.matmul(P,Aij_forward,transpose_b=True))
spI = tf.SparseTensor(indices=id_indices,values=2.0*val,dense_shape=[Sshape[0], Sshape[0]])
AttAdj_I = tf.sparse_add(AttAdj,spI)
alphas = tf.sparse_softmax(AttAdj_I)
else:
alphas = tf.sparse_softmax(AttAdj)
#dropout is done after the softmax
if use_dropout:
traceln(' -- ... using dropout for attention layer')
alphasD = tf.SparseTensor(indices=alphas.indices,values=tf.nn.dropout(alphas.values, 1.0 - dropout_attention),dense_shape=alphas.dense_shape)
P_D =tf.nn.dropout(P,1.0-dropout_node)
alphasP = tf.sparse_tensor_dense_matmul(alphasD, P_D)
return alphasD, alphasP
else:
#We compute the features given by the attentive neighborhood
alphasP = tf.sparse_tensor_dense_matmul(alphas,P)
return alphas,alphasP
def _create_original_model(self):
std_dev_in = float(1.0 / float(self.node_dim))
self.use_dropout = self.dropout_rate_attention > 0 or self.dropout_rate_node > 0
self.hidden_layer = []
attns0 = []
# Define the First Layer from the Node Input
for a in range(self.nb_attention):
# H0 = Maybe do a common H0 and have different attention parameters
# Change the attention, maybe ?
# Do multiplicative
# How to add edges here
# Just softmax makes a differences
# I could stack [current_node,representation; edge_features;] and do a dot product on that
Wa = init_glorot([int(self.node_dim), int(self.node_indim)], name='Wa0' + str(a))
va = init_glorot([2, int(self.node_indim)], name='va0' + str(a))
if self.distinguish_node_from_neighbor:
H0 = tf.matmul(self.node_input, Wa)
attns0.append(H0)
_, nH = self.simple_graph_attention_layer(self.node_input, Wa, va, self.Ssparse, self.Tsparse, self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=True)
attns0.append(nH)
self.hidden_layer.append(
self.activation(tf.concat(attns0, axis=-1))) # Now dims should be indim*self.nb_attention
# Define Intermediate Layers
for i in range(1, self.num_layers):
attns = []
for a in range(self.nb_attention):
if self.distinguish_node_from_neighbor:
Wia = init_glorot(
[int(self.node_indim * self.nb_attention + self.node_indim), int(self.node_indim)],
name='Wa' + str(i) + '_' + str(a))
else:
Wia = init_glorot([int(self.node_indim * self.nb_attention), int(self.node_indim)],
name='Wa' + str(i) + '_' + str(a))
via = init_glorot([2, int(self.node_indim)], name='va' + str(i) + '_' + str(a))
_, nH = self.simple_graph_attention_layer(self.hidden_layer[-1], Wia, via, self.Ssparse, self.Tsparse,
self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=True)
attns.append(nH)
self.hidden_layer.append(self.activation(tf.concat(attns, axis=-1)))
# Define Logit Layer
out = []
for i in range(self.nb_attention):
#for i in range(1):
logits_a = init_glorot([int(self.node_indim * self.nb_attention), int(self.n_classes)],
name='Logita' + '_' + str(a))
via = init_glorot([2, int(self.n_classes)], name='LogitA' + '_' + str(a))
_, nL = self.simple_graph_attention_layer(self.hidden_layer[-1], logits_a, via, self.Ssparse, self.Tsparse,
self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=True)
out.append(nL)
self.logits = tf.add_n(out) / self.nb_attention
#self.logits = out[0]
def _create_nodedistint_model(self):
'''
Create a model the separe node distinct models
:return:
'''
std_dev_in = float(1.0 / float(self.node_dim))
self.use_dropout = self.dropout_rate_attention > 0 or self.dropout_rate_node > 0
self.hidden_layer = []
attns0 = []
# Define the First Layer from the Node Input
Wa = tf.eye(int(self.node_dim), name='I0')
H0 = tf.matmul(self.node_input, Wa)
attns0.append(H0)
I = tf.Variable(tf.eye(self.node_dim), trainable=False)
for a in range(self.nb_attention):
# H0 = Maybe do a common H0 and have different attention parameters
# Change the attention, maybe ?
# Do multiplicative
# How to add edges here
# Just softmax makes a differences
# I could stack [current_node,representation; edge_features;] and do a dot product on that
va = init_glorot([2, int(self.node_dim)], name='va0' + str(a))
_, nH = self.simple_graph_attention_layer(H0, I, va, self.Ssparse, self.Tsparse, self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=False,attn_type=self.attn_type)
attns0.append(nH)
self.hidden_layer.append(
self.activation(tf.concat(attns0, axis=-1))) # Now dims should be indim*self.nb_attention
# Define Intermediate Layers
for i in range(1, self.num_layers):
attns = []
if i == 1:
previous_layer_dim =int(self.node_dim * self.nb_attention + self.node_dim)
Wia = init_glorot([previous_layer_dim, int(self.node_indim)],
name='Wa' + str(i) + '_' + str(a))
else:
previous_layer_dim = int(self.node_indim * self.nb_attention + self.node_indim)
Wia = init_glorot( [previous_layer_dim, int(self.node_indim)], name='Wa' + str(i) + '_' + str(a))
Hi = tf.matmul(self.hidden_layer[-1], Wia)
attns.append(Hi)
Ia = tf.Variable(tf.eye(self.node_indim), trainable=False)
for a in range(self.nb_attention):
via = init_glorot([2, int(self.node_indim)], name='va' + str(i) + '_' + str(a))
_, nH = self.simple_graph_attention_layer(Hi, Ia, via, self.Ssparse, self.Tsparse,
self.Aind,
self.Sshape, self.nb_edge, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout, add_self_loop=False,attn_type=self.attn_type)
attns.append(nH)
self.hidden_layer.append(self.activation(tf.concat(attns, axis=-1)))
# Define Logit Layer
#TODO Add Attention on Logit Layer
#It would not cost too much to add an attn mecha once I get the logits
#If x,y are indicated in the node feature then we can implicitly find the type of edges that we are using ...
if self.num_layers>1:
logits_a = init_glorot([int(self.node_indim * self.nb_attention+self.node_indim), int(self.n_classes)],
name='Logita' + '_' + str(a))
else:
logits_a = init_glorot([int(self.node_dim * self.nb_attention + self.node_dim), int(self.n_classes)],
name='Logita' + '_' + str(a))
Bc = tf.ones([int(self.n_classes)], name='LogitA' + '_' + str(a))
# self.logits = tf.add_n(out) / self.nb_attention
self.logits = tf.matmul(self.hidden_layer[-1],logits_a) +Bc
def _create_densegraph_model(self):
'''
Create a model the separe node distinct models
:return:
'''
std_dev_in = float(1.0 / float(self.node_dim))
self.use_dropout = self.dropout_rate_attention > 0 or self.dropout_rate_node > 0
self.hidden_layer = []
attns0 = []
# Define the First Layer from the Node Input
Wa = tf.eye(int(self.node_dim), name='I0')
H0 = tf.matmul(self.node_input, Wa)
attns0.append(H0)
I = tf.Variable(tf.eye(self.node_dim), trainable=False)
for a in range(self.nb_attention):
# H0 = Maybe do a common H0 and have different attention parameters
# Change the attention, maybe ?
# Do multiplicative
# How to add edges here
# Just softmax makes a differences
# I could stack [current_node,representation; edge_features;] and do a dot product on that
va = init_glorot([2, int(self.node_dim)], name='va0' + str(a))
_, nH = self.dense_graph_attention_layer(H0, I, va, self.nb_node, self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout)
attns0.append(nH)
self.hidden_layer.append(
self.activation(tf.concat(attns0, axis=-1))) # Now dims should be indim*self.nb_attention
# Define Intermediate Layers
for i in range(1, self.num_layers):
attns = []
if i == 1:
previous_layer_dim =int(self.node_dim * self.nb_attention + self.node_dim)
Wia = init_glorot([previous_layer_dim, int(self.node_indim)],
name='Wa' + str(i) + '_' + str(a))
else:
previous_layer_dim = int(self.node_indim * self.nb_attention + self.node_indim)
Wia = init_glorot( [previous_layer_dim, int(self.node_indim)], name='Wa' + str(i) + '_' + str(a))
Hi = tf.matmul(self.hidden_layer[-1], Wia)
attns.append(Hi)
Ia = tf.Variable(tf.eye(self.node_indim), trainable=False)
for a in range(self.nb_attention):
via = init_glorot([2, int(self.node_indim)], name='va' + str(i) + '_' + str(a))
_, nH = self.dense_graph_attention_layer(Hi, Ia, via, self.nb_node,
self.dropout_p_attn,
self.dropout_p_node,
use_dropout=self.use_dropout)
attns.append(nH)
self.hidden_layer.append(self.activation(tf.concat(attns, axis=-1)))
# Define Logit Layer
#TODO Add Attention on Logit Layer
#It would not cost too much to add an attn mecha once I get the logits
#If x,y are indicated in the node feature then we can implicitly find the type of edges that we are using ...
if self.num_layers>1:
logits_a = init_glorot([int(self.node_indim * self.nb_attention+self.node_indim), int(self.n_classes)],
name='Logita' + '_' + str(a))
else:
logits_a = init_glorot([int(self.node_dim * self.nb_attention + self.node_dim), int(self.n_classes)],
name='Logita' + '_' + str(a))
Bc = tf.ones([int(self.n_classes)], name='LogitA' + '_' + str(a))
# self.logits = tf.add_n(out) / self.nb_attention
self.logits = tf.matmul(self.hidden_layer[-1],logits_a) +Bc
def create_model(self):
'''
Create the tensorflow graph for the model
:return:
'''
self.nb_node = tf.placeholder(tf.int32,(), name='nb_node')
self.nb_edge = tf.placeholder(tf.int32, (), name='nb_edge')
self.node_input = tf.placeholder(tf.float32, [None, self.node_dim], name='X_')
self.y_input = tf.placeholder(tf.float32, [None, self.n_classes], name='Y')
#self.dropout_p_H = tf.placeholder(tf.float32,(), name='dropout_prob_H')
self.dropout_p_node = tf.placeholder(tf.float32, (), name='dropout_prob_N')
self.dropout_p_attn = tf.placeholder(tf.float32, (), name='dropout_prob_edges')
self.S = tf.placeholder(tf.float32, name='S')
self.Ssparse = tf.placeholder(tf.int64, name='Ssparse') #indices
self.Sshape = tf.placeholder(tf.int64, name='Sshape') #indices
self.Aind =tf.placeholder(tf.int64, name='Sshape') #Adjacency indices
self.T = tf.placeholder(tf.float32,[None,None], name='T')
self.Tsparse = tf.placeholder(tf.int64, name='Tsparse')
#self.S_indice = tf.placeholder(tf.in, [None, None], name='S')
#self.F = tf.placeholder(tf.float32,[None,None], name='F')
if self.original_model:
self._create_original_model()
elif self.dense_model:
self._create_densegraph_model()
else:
self._create_nodedistint_model()
cross_entropy_source = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_input)
self.loss = tf.reduce_mean(cross_entropy_source)
self.predict_proba = tf.nn.softmax(self.logits)
self.pred = tf.argmax(tf.nn.softmax(self.logits), 1)
self.correct_prediction = tf.equal(self.pred, tf.argmax(self.y_input, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
self.grads_and_vars = self.optalg.compute_gradients(self.loss)
self.train_step = self.optalg.apply_gradients(self.grads_and_vars)
# Add ops to save and restore all the variables.
self.init = tf.global_variables_initializer()
self.saver= tf.train.Saver(max_to_keep=0)
traceln(' -- Number of Params: ', self.get_nb_params())
#TODO Move in MultigraphNN
def save_model(self, session, model_filename):
traceln("Saving Model")
save_path = self.saver.save(session, model_filename)
def restore_model(self, session, model_filename):
self.saver.restore(session, model_filename)
traceln("Model restored.")
def train(self,session,graph,verbose=False,n_iter=1):
'''
Apply a train operation, ie sgd step for a single graph
:param session:
:param graph: a graph from GCN_Dataset
:param verbose:
:param n_iter: (default 1) number of steps to perform sgd for this graph
:return:
'''
#TrainEvalSet Here
for i in range(n_iter):
#traceln(' -- Train',X.shape,EA.shape)
#traceln(' -- DropoutEdges',self.dropout_rate_edge)
Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64')
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
#self.F: graph.F,
self.Aind: Aind,
self.y_input: graph.Y,
#self.dropout_p_H: self.dropout_rate_H,
self.dropout_p_node: self.dropout_rate_node,
self.dropout_p_attn: self.dropout_rate_attention,
}
Ops =session.run([self.train_step,self.loss], feed_dict=feed_batch)
if verbose:
traceln(' -- Training Loss',Ops[1])
def test(self,session,graph,verbose=True):
'''
Test return the loss and accuracy for the graph passed as argument
:param session:
:param graph:
:param verbose:
:return:
'''
Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64')
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
self.Aind: Aind,
#self.F: graph.F,
self.y_input: graph.Y,
#self.dropout_p_H: 0.0,
self.dropout_p_node: 0.0,
self.dropout_p_attn: 0.0,
#self.dropout_p_edge_feat: 0.0,
#self.NA_indegree: graph.NA_indegree
}
Ops =session.run([self.loss,self.accuracy], feed_dict=feed_batch)
if verbose:
traceln(' -- Test Loss',Ops[0],' Test Accuracy:',Ops[1])
return Ops[1]
def predict(self,session,graph,verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64')
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
# fast_gcn.S: np.asarray(graph.S.todense()).squeeze(),
# fast_gcn.Ssparse: np.vstack([graph.S.row,graph.S.col]),
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
# fast_gcn.T: np.asarray(graph.T.todense()).squeeze(),
#self.F: graph.F,
self.Aind: Aind,
self.dropout_p_node: 0.0,
self.dropout_p_attn: 0.0,
}
Ops = session.run([self.pred], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:',Ops[0].shape)
return Ops[0]
def prediction_prob(self, session, graph, verbose=True):
'''
Does the prediction
:param session:
:param graph:
:param verbose:
:return:
'''
Aind = np.array(np.stack([graph.Sind[:, 0], graph.Tind[:, 1]], axis=-1), dtype='int64')
feed_batch = {
self.nb_node: graph.X.shape[0],
self.nb_edge: graph.F.shape[0],
self.node_input: graph.X,
# fast_gcn.S: np.asarray(graph.S.todense()).squeeze(),
# fast_gcn.Ssparse: np.vstack([graph.S.row,graph.S.col]),
self.Ssparse: np.array(graph.Sind, dtype='int64'),
self.Sshape: np.array([graph.X.shape[0], graph.F.shape[0]], dtype='int64'),
self.Tsparse: np.array(graph.Tind, dtype='int64'),
# fast_gcn.T: np.asarray(graph.T.todense()).squeeze(),
# self.F: graph.F,
self.Aind: Aind,
self.dropout_p_node: 0.0,
self.dropout_p_attn: 0.0,
}
Ops = session.run([self.predict_proba], feed_dict=feed_batch)
if verbose:
traceln(' -- Got Prediction for:', Ops[0].shape)
return Ops[0]
#TODO Move that MultiGraphNN
def train_All_lG(self,session,graph_train,graph_val, max_iter, eval_iter = 10, patience = 7, graph_test = None, save_model_path = None):
'''
Merge all the graph and train on them
:param session:
:param graph_train: the list of graph to train on
:param graph_val: the list of graph used for validation
:param max_iter: maximum number of epochs
:param eval_iter: evaluate every eval_iter
:param patience: stopped training if accuracy is not improved on the validation set after patience_value
:param graph_test: Optional. If a test set is provided, then accuracy on the test set is reported
:param save_model_path: checkpoints filename to save the model.
:return: A Dictionary with training accuracies, validations accuracies and test accuracies if any, and the Wedge parameters
'''
best_val_acc = 0.0
wait = 0
stop_training = False
stopped_iter = max_iter
train_accuracies = []
validation_accuracies = []
test_accuracies = []
conf_mat = []
start_monitoring_val_acc = False
# Not Efficient to compute this for
merged_graph = gcn_datasets.GCNDataset.merge_allgraph(graph_train)
self.train(session, merged_graph, n_iter=1)
for i in range(max_iter):
if stop_training:
break
if i % eval_iter == 0:
traceln('\n -- Epoch', i)
_, tr_acc = self.test_lG(session, graph_train, verbose=False)
traceln(' -- Train Acc', '%.4f' % tr_acc)
train_accuracies.append(tr_acc)
_, node_acc = self.test_lG(session, graph_val, verbose=False)
traceln(' -- Valid Acc', '%.4f' % node_acc)
validation_accuracies.append(node_acc)
if save_model_path:
save_path = self.saver.save(session, save_model_path, global_step=i)
if graph_test:
_, test_acc = self.test_lG(session, graph_test, verbose=False)
traceln(' -- Test Acc', '%.4f' % test_acc)
test_accuracies.append(test_acc)
# TODO min_delta
# if tr_acc>0.99:
# start_monitoring_val_acc=True
if node_acc > best_val_acc:
best_val_acc = node_acc
wait = 0
else:
if wait >= patience:
stopped_iter = i
stop_training = True
wait += 1
else:
self.train(session, merged_graph, n_iter=1)
# Final Save
# if save_model_path:
# save_path = self.saver.save(session, save_model_path, global_step=i)
# TODO Add the final step
mean_acc = []
traceln(' -- Stopped Model Training after', stopped_iter)
traceln(' -- Val Accuracies', validation_accuracies)
traceln(' -- Final Training Accuracy')
_, node_train_acc = self.test_lG(session, graph_train)
traceln(' -- Train Mean Accuracy', '%.4f' % node_train_acc)
traceln(' -- Final Valid Acc')
self.test_lG(session, graph_val)
R = {}
R['train_acc'] = train_accuracies
R['val_acc'] = validation_accuracies
R['test_acc'] = test_accuracies
R['stopped_iter'] = stopped_iter
R['confusion_matrix'] = conf_mat
# R['W_edge'] =self.get_Wedge(session)
if graph_test:
_, final_test_acc = self.test_lG(session, graph_test)
traceln(' -- Final Test Acc', '%.4f' % final_test_acc)
R['final_test_acc'] = final_test_acc
return R
| bsd-3-clause |
Mariaanisimova/pythonintask | PINp/2014/Chernov_M_S/task_4_27.py | 1 | 1532 | # Задание 4. Вариант 27.
# Напишите программу, которая выводит имя, под которым скрывается Доменико
# Теотокопули. Дополнительно необходимо вывести область интересов указанной
# личности, место рождения, годы рождения и смерти (если человек умер), вычислить
# возраст на данный момент (или момент смерти). Для хранения всех необходимых
# данных требуется использовать переменные. После вывода информации программа
# должна дожидаться пока пользователь нажмет Enter для выхода.
# Чернов Михаил Сергеевич
# 28.05.2016
name=input("Герой нашей программы - Доменико Теотокопули.\nПод каким же именем мы знаем этого человека?")
print("Ваш ответ:", name)
print("Все верно: Доменико Теотокопули -", name)
print("Место рождения: Крит, Греция.")
print("Год рождения: 1541.")
print("Год смерти: 1614.")
print("Возраст на момент смерти:", 1541-1614)
print("Область интересов: Художник.")
input("\n\nНажмите Enter для выхода.")
| apache-2.0 |
geggo/pyface | docs/source/sphinxext/refactordoc/function_doc.py | 6 | 3097 | # -*- coding: UTF-8 -*-
#------------------------------------------------------------------------------
# file: function_doc.py
# License: LICENSE.TXT
# Author: Ioannis Tziakos
#
# Copyright (c) 2011, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from base_doc import BaseDoc
from line_functions import get_indent, add_indent
from fields import ArgumentField, ListItemWithTypeField, ListItemField
class FunctionDoc(BaseDoc):
"""Docstring refactoring for functions"""
def __init__(self, lines, headers=None, verbose=False):
if headers is None:
headers = {'Returns': 'returns', 'Arguments': 'arguments',
'Parameters': 'arguments', 'Raises': 'raises',
'Yields': 'returns', 'Notes':'notes'}
super(FunctionDoc, self).__init__(lines, headers, verbose)
return
def _refactor_returns(self, header):
"""Refactor the return section to sphinx friendly format.
"""
index = self.index
self.remove_lines(index, 2)
indent = get_indent(self.peek())
fields = self.extract_fields(indent, field_type=ListItemWithTypeField)
lines = [indent + ':returns:']
prefix = '' if len(fields) == 1 else '- '
for field in fields:
lines += field.to_rst(len(indent) + 4, prefix)
self.insert_lines(lines, index)
self.index += len(lines)
return
def _refactor_raises(self, header):
"""Refactor the raises section to sphinx friendly format"""
index = self.index
self.remove_lines(index, 2)
indent = get_indent(self.peek())
fields = self.extract_fields(indent, field_type=ListItemField)
lines = [indent + ':raises:']
prefix = '' if len(fields) == 1 else '- '
for field in fields:
lines += field.to_rst(len(indent) + 4, prefix)
self.insert_lines(lines, index)
self.index += len(lines)
return
def _refactor_arguments(self, header):
"""Refactor the argument section to sphinx friendly format
"""
index = self.index
self.remove_lines(index, 2)
indent = get_indent(self.peek())
fields = self.extract_fields(indent, field_type=ArgumentField)
lines = []
for field in fields:
lines += field.to_rst(len(indent))
self.insert_lines(lines, index)
self.index += len(lines)
return
def _refactor_notes(self, header):
"""Refactor the argument section to sphinx friendly format.
"""
if self.verbose:
print 'Refactoring Notes'
descriptions = []
index = self.index
self.remove_lines(index, 2)
indent = get_indent(self.peek())
paragraph = self.get_next_paragraph()
descriptions.append(indent + '.. note::')
descriptions += add_indent(paragraph)
self.insert_lines(descriptions, index)
self.index += len(descriptions)
return descriptions
| bsd-3-clause |
malmiron/incubator-airflow | tests/plugins/test_plugin.py | 2 | 3720 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This is the class you derive to create a plugin
from airflow.plugins_manager import AirflowPlugin
from flask import Blueprint
from flask_admin import BaseView, expose
from flask_admin.base import MenuLink
from flask_appbuilder import BaseView as AppBuilderBaseView
# Importing base classes that we need to derive
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.executors.base_executor import BaseExecutor
# Will show up under airflow.hooks.test_plugin.PluginHook
class PluginHook(BaseHook):
pass
# Will show up under airflow.operators.test_plugin.PluginOperator
class PluginOperator(BaseOperator):
pass
# Will show up under airflow.sensors.test_plugin.PluginSensorOperator
class PluginSensorOperator(BaseSensorOperator):
pass
# Will show up under airflow.executors.test_plugin.PluginExecutor
class PluginExecutor(BaseExecutor):
pass
# Will show up under airflow.macros.test_plugin.plugin_macro
def plugin_macro():
pass
# Creating a flask admin BaseView
class TestView(BaseView):
@expose('/')
def test(self):
# in this example, put your test_plugin/test.html
# template at airflow/plugins/templates/test_plugin/test.html
return self.render("test_plugin/test.html", content="Hello galaxy!")
v = TestView(category="Test Plugin", name="Test View")
# Creating a flask appbuilder BaseView
class TestAppBuilderBaseView(AppBuilderBaseView):
default_view = "test"
@expose("/")
def test(self):
return self.render("test_plugin/test.html", content="Hello galaxy!")
v_appbuilder_view = TestAppBuilderBaseView()
v_appbuilder_package = {"name": "Test View",
"category": "Test Plugin",
"view": v_appbuilder_view}
# Creating a flask appbuilder Menu Item
appbuilder_mitem = {"name": "Google",
"category": "Search",
"category_icon": "fa-th",
"href": "https://www.google.com"}
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"test_plugin", __name__,
template_folder='templates', # registers airflow/plugins/templates as a Jinja template folder
static_folder='static',
static_url_path='/static/test_plugin')
ml = MenuLink(
category='Test Plugin',
name="Test Menu Link",
url="https://airflow.incubator.apache.org/")
# Defining the plugin class
class AirflowTestPlugin(AirflowPlugin):
name = "test_plugin"
operators = [PluginOperator]
sensors = [PluginSensorOperator]
hooks = [PluginHook]
executors = [PluginExecutor]
macros = [plugin_macro]
admin_views = [v]
flask_blueprints = [bp]
menu_links = [ml]
appbuilder_views = [v_appbuilder_package]
appbuilder_menu_items = [appbuilder_mitem]
| apache-2.0 |
scholarly/pynacl | src/nacl/signing.py | 1 | 5489 | # Copyright 2013 Donald Stufft and individual contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import six
from nacl import encoding
import nacl.c
from nacl.utils import StringFixer, random
class SignedMessage(six.binary_type):
"""
A bytes subclass that holds a messaged that has been signed by a
:class:`SigningKey`.
"""
@classmethod
def _from_parts(cls, signature, message, combined):
obj = cls(combined)
obj._signature = signature
obj._message = message
return obj
@property
def signature(self):
"""
The signature contained within the :class:`SignedMessage`.
"""
return self._signature
@property
def message(self):
"""
The message contained within the :class:`SignedMessage`.
"""
return self._message
class VerifyKey(encoding.Encodable, StringFixer, object):
"""
The public key counterpart to an Ed25519 SigningKey for producing digital
signatures.
:param key: [:class:`bytes`] Serialized Ed25519 public key
:param encoder: A class that is able to decode the `key`
"""
def __init__(self, key, encoder=encoding.RawEncoder):
# Decode the key
key = encoder.decode(key)
if len(key) != nacl.c.crypto_sign_PUBLICKEYBYTES:
raise ValueError(
"The key must be exactly %s bytes long" %
nacl.c.crypto_sign_PUBLICKEYBYTES,
)
self._key = key
def __bytes__(self):
return self._key
def verify(self, smessage, signature=None, encoder=encoding.RawEncoder):
"""
Verifies the signature of a signed message, returning the message
if it has not been tampered with else raising
:class:`~nacl.signing.BadSignatureError`.
:param smessage: [:class:`bytes`] Either the original messaged or a
signature and message concated together.
:param signature: [:class:`bytes`] If an unsigned message is given for
smessage then the detached signature must be provded.
:param encoder: A class that is able to decode the secret message and
signature.
:rtype: :class:`bytes`
"""
if signature is not None:
# If we were given the message and signature separately, combine
# them.
smessage = signature + smessage
# Decode the signed message
smessage = encoder.decode(smessage)
return nacl.c.crypto_sign_open(smessage, self._key)
class SigningKey(encoding.Encodable, StringFixer, object):
"""
Private key for producing digital signatures using the Ed25519 algorithm.
Signing keys are produced from a 32-byte (256-bit) random seed value. This
value can be passed into the :class:`~nacl.signing.SigningKey` as a
:func:`bytes` whose length is 32.
.. warning:: This **must** be protected and remain secret. Anyone who knows
the value of your :class:`~nacl.signing.SigningKey` or it's seed can
masquerade as you.
:param seed: [:class:`bytes`] Random 32-byte value (i.e. private key)
:param encoder: A class that is able to decode the seed
:ivar: verify_key: [:class:`~nacl.signing.VerifyKey`] The verify
(i.e. public) key that corresponds with this signing key.
"""
def __init__(self, seed, encoder=encoding.RawEncoder):
# Decode the seed
seed = encoder.decode(seed)
# Verify that our seed is the proper size
if len(seed) != nacl.c.crypto_sign_SEEDBYTES:
raise ValueError(
"The seed must be exactly %d bytes long" %
nacl.c.crypto_sign_SEEDBYTES
)
public_key, secret_key = nacl.c.crypto_sign_seed_keypair(seed)
self._seed = seed
self._signing_key = secret_key
self.verify_key = VerifyKey(public_key)
def __bytes__(self):
return self._seed
@classmethod
def generate(cls):
"""
Generates a random :class:`~nacl.signing.SingingKey` object.
:rtype: :class:`~nacl.signing.SigningKey`
"""
return cls(
random(nacl.c.crypto_sign_SEEDBYTES),
encoder=encoding.RawEncoder,
)
def sign(self, message, encoder=encoding.RawEncoder):
"""
Sign a message using this key.
:param message: [:class:`bytes`] The data to be signed.
:param encoder: A class that is used to encode the signed message.
:rtype: :class:`~nacl.signing.SignedMessage`
"""
raw_signed = nacl.c.crypto_sign(message, self._signing_key)
signature = encoder.encode(raw_signed[:nacl.c.crypto_sign_BYTES])
message = encoder.encode(raw_signed[nacl.c.crypto_sign_BYTES:])
signed = encoder.encode(raw_signed)
return SignedMessage._from_parts(signature, message, signed)
| apache-2.0 |
patilsangram/erpnext | erpnext/accounts/doctype/budget/test_budget.py | 9 | 11765 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate
from erpnext.accounts.utils import get_fiscal_year
from erpnext.buying.doctype.purchase_order.test_purchase_order import create_purchase_order
from erpnext.accounts.doctype.budget.budget import get_actual_expense, BudgetError
from erpnext.accounts.doctype.journal_entry.test_journal_entry import make_journal_entry
class TestBudget(unittest.TestCase):
def test_monthly_budget_crossed_ignore(self):
set_total_expense_zero("2013-02-28", "Cost Center")
budget = make_budget(budget_against="Cost Center")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 40000, "_Test Cost Center - _TC", posting_date="2013-02-28", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv.name}))
budget.cancel()
def test_monthly_budget_crossed_stop1(self):
set_total_expense_zero("2013-02-28", "Cost Center")
budget = make_budget(budget_against="Cost Center")
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 40000, "_Test Cost Center - _TC", posting_date="2013-02-28")
self.assertRaises(BudgetError, jv.submit)
budget.load_from_db()
budget.cancel()
def test_exception_approver_role(self):
set_total_expense_zero("2013-02-28", "Cost Center")
budget = make_budget(budget_against="Cost Center")
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 40000, "_Test Cost Center - _TC", posting_date="2013-03-02")
self.assertRaises(BudgetError, jv.submit)
frappe.db.set_value('Company', budget.company, 'exception_budget_approver_role', 'Accounts User')
jv.submit()
self.assertEqual(frappe.db.get_value('Journal Entry', jv.name, 'docstatus'), 1)
jv.cancel()
frappe.db.set_value('Company', budget.company, 'exception_budget_approver_role', '')
budget.load_from_db()
budget.cancel()
def test_monthly_budget_crossed_for_mr(self):
budget = make_budget(applicable_on_material_request=1,
applicable_on_purchase_order=1, action_if_accumulated_monthly_budget_exceeded_on_mr="Stop",
budget_against="Cost Center")
fiscal_year = get_fiscal_year(nowdate())[0]
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
frappe.db.set_value("Budget", budget.name, "fiscal_year", fiscal_year)
mr = frappe.get_doc({
"doctype": "Material Request",
"material_request_type": "Purchase",
"transaction_date": nowdate(),
"company": budget.company,
"items": [{
'item_code': '_Test Item',
'qty': 1,
'uom': "_Test UOM",
'warehouse': '_Test Warehouse - _TC',
'schedule_date': nowdate(),
'rate': 100000,
'expense_account': '_Test Account Cost for Goods Sold - _TC',
'cost_center': '_Test Cost Center - _TC'
}]
})
mr.set_missing_values()
self.assertRaises(BudgetError, mr.submit)
budget.load_from_db()
budget.cancel()
def test_monthly_budget_crossed_for_po(self):
budget = make_budget(applicable_on_purchase_order=1,
action_if_accumulated_monthly_budget_exceeded_on_po="Stop", budget_against="Cost Center")
fiscal_year = get_fiscal_year(nowdate())[0]
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
frappe.db.set_value("Budget", budget.name, "fiscal_year", fiscal_year)
po = create_purchase_order(transaction_date=nowdate(), do_not_submit=True)
po.set_missing_values()
self.assertRaises(BudgetError, po.submit)
budget.load_from_db()
budget.cancel()
def test_monthly_budget_crossed_stop2(self):
set_total_expense_zero("2013-02-28", "Project")
budget = make_budget(budget_against="Project")
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 40000, "_Test Cost Center - _TC", project="_Test Project", posting_date="2013-02-28")
self.assertRaises(BudgetError, jv.submit)
budget.load_from_db()
budget.cancel()
def test_yearly_budget_crossed_stop1(self):
set_total_expense_zero("2013-02-28", "Cost Center")
budget = make_budget(budget_against="Cost Center")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 150000, "_Test Cost Center - _TC", posting_date="2013-03-28")
self.assertRaises(BudgetError, jv.submit)
budget.cancel()
def test_yearly_budget_crossed_stop2(self):
set_total_expense_zero("2013-02-28", "Project")
budget = make_budget(budget_against="Project")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 150000, "_Test Cost Center - _TC", project="_Test Project", posting_date="2013-03-28")
self.assertRaises(BudgetError, jv.submit)
budget.cancel()
def test_monthly_budget_on_cancellation1(self):
set_total_expense_zero("2013-02-28", "Cost Center")
budget = make_budget(budget_against="Cost Center")
jv1 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 20000, "_Test Cost Center - _TC", posting_date="2013-02-28", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv1.name}))
jv2 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 20000, "_Test Cost Center - _TC", posting_date="2013-02-28", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv2.name}))
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
self.assertRaises(BudgetError, jv1.cancel)
budget.load_from_db()
budget.cancel()
def test_monthly_budget_on_cancellation2(self):
set_total_expense_zero("2013-02-28", "Project")
budget = make_budget(budget_against="Project")
jv1 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 20000, "_Test Cost Center - _TC", posting_date="2013-02-28", submit=True, project="_Test Project")
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv1.name}))
jv2 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 20000, "_Test Cost Center - _TC", posting_date="2013-02-28", submit=True, project="_Test Project")
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv2.name}))
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
self.assertRaises(BudgetError, jv1.cancel)
budget.load_from_db()
budget.cancel()
def test_monthly_budget_against_group_cost_center(self):
set_total_expense_zero("2013-02-28", "Cost Center")
set_total_expense_zero("2013-02-28", "Cost Center", "_Test Cost Center 2 - _TC")
budget = make_budget(budget_against="Cost Center", cost_center="_Test Company - _TC")
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 40000, "_Test Cost Center 2 - _TC", posting_date="2013-02-28")
self.assertRaises(BudgetError, jv.submit)
budget.load_from_db()
budget.cancel()
def test_monthly_budget_against_parent_group_cost_center(self):
cost_center = "_Test Cost Center 3 - _TC"
if not frappe.db.exists("Cost Center", cost_center):
frappe.get_doc({
'doctype': 'Cost Center',
'cost_center_name': '_Test Cost Center 3',
'parent_cost_center': "_Test Company - _TC",
'company': '_Test Company',
'is_group': 0
}).insert(ignore_permissions=True)
budget = make_budget(budget_against="Cost Center", cost_center=cost_center)
frappe.db.set_value("Budget", budget.name, "action_if_accumulated_monthly_budget_exceeded", "Stop")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 40000, cost_center, posting_date="2013-02-28")
self.assertRaises(BudgetError, jv.submit)
budget.load_from_db()
budget.cancel()
jv.cancel()
frappe.delete_doc('Journal Entry', jv.name)
frappe.delete_doc('Cost Center', cost_center)
def set_total_expense_zero(posting_date, budget_against_field=None, budget_against_CC=None):
if budget_against_field == "Project":
budget_against = "_Test Project"
else:
budget_against = budget_against_CC or "_Test Cost Center - _TC"
existing_expense = get_actual_expense(frappe._dict({
"account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center - _TC",
"monthly_end_date": posting_date,
"company": "_Test Company",
"fiscal_year": "_Test Fiscal Year 2013",
"budget_against_field": budget_against_field,
"budget_against": budget_against
}))
if existing_expense:
if budget_against_field == "Cost Center":
make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", -existing_expense, "_Test Cost Center - _TC", posting_date="2013-02-28", submit=True)
elif budget_against_field == "Project":
make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", -existing_expense, "_Test Cost Center - _TC", submit=True, project="_Test Project", posting_date="2013-02-28")
def make_budget(**args):
args = frappe._dict(args)
budget_against=args.budget_against
cost_center=args.cost_center
if budget_against == "Project":
budget_list = frappe.get_all("Budget", fields=["name"], filters = {"name": ("like", "_Test Project/_Test Fiscal Year 2013%")})
else:
cost_center_name = "{0}%".format(cost_center or "_Test Cost Center - _TC/_Test Fiscal Year 2013")
budget_list = frappe.get_all("Budget", fields=["name"], filters = {"name": ("like", cost_center_name)})
for d in budget_list:
frappe.db.sql("delete from `tabBudget` where name = %(name)s", d)
frappe.db.sql("delete from `tabBudget Account` where parent = %(name)s", d)
budget = frappe.new_doc("Budget")
if budget_against == "Project":
budget.project = "_Test Project"
else:
budget.cost_center =cost_center or "_Test Cost Center - _TC"
budget.fiscal_year = "_Test Fiscal Year 2013"
budget.monthly_distribution = "_Test Distribution"
budget.company = "_Test Company"
budget.applicable_on_booking_actual_expenses = 1
budget.action_if_annual_budget_exceeded = "Stop"
budget.action_if_accumulated_monthly_budget_exceeded = "Ignore"
budget.budget_against = budget_against
budget.append("accounts", {
"account": "_Test Account Cost for Goods Sold - _TC",
"budget_amount": 100000
})
if args.applicable_on_material_request:
budget.applicable_on_material_request = 1
budget.action_if_annual_budget_exceeded_on_mr = args.action_if_annual_budget_exceeded_on_mr or 'Warn'
budget.action_if_accumulated_monthly_budget_exceeded_on_mr = args.action_if_accumulated_monthly_budget_exceeded_on_mr or 'Warn'
if args.applicable_on_purchase_order:
budget.applicable_on_purchase_order = 1
budget.action_if_annual_budget_exceeded_on_po = args.action_if_annual_budget_exceeded_on_po or 'Warn'
budget.action_if_accumulated_monthly_budget_exceeded_on_po = args.action_if_accumulated_monthly_budget_exceeded_on_po or 'Warn'
budget.insert()
budget.submit()
return budget
| gpl-3.0 |
fritsvanveen/QGIS | python/ext-libs/pygments/lexers/__init__.py | 35 | 8743 | # -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, itervalues, guess_decode
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + list(LEXERS)
_lexer_cache = {}
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in itervalues(LEXERS):
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus
return cls.priority + bonus
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
| gpl-2.0 |
nkrinner/nova | nova/api/openstack/compute/plugins/v3/suspend_server.py | 15 | 3308 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
ALIAS = "os-suspend-server"
def authorize(context, action_name):
action = 'v3:%s:%s' % (ALIAS, action_name)
extensions.extension_authorizer('compute', action)(context)
class SuspendServerController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SuspendServerController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@extensions.expected_errors((404, 409))
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server."""
context = req.environ['nova.context']
authorize(context, 'suspend')
try:
server = common.get_instance(self.compute_api, context, id,
want_objects=True)
self.compute_api.suspend(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend')
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend."""
context = req.environ['nova.context']
authorize(context, 'resume')
try:
server = common.get_instance(self.compute_api, context, id,
want_objects=True)
self.compute_api.resume(context, server)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume')
return webob.Response(status_int=202)
class SuspendServer(extensions.V3APIExtensionBase):
"""Enable suspend/resume server actions."""
name = "SuspendServer"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/%s/api/v3" % ALIAS
version = 1
def get_controller_extensions(self):
controller = SuspendServerController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py | 23 | 5719 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
import sys
# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
# system, use it to handle IPAddress ServerAltnames (this was added in
# python-3.5) otherwise only do DNS matching. This allows
# backports.ssl_match_hostname to continue to be used all the way back to
# python-2.4.
try:
from pip._vendor import ipaddress
except ImportError:
ipaddress = None
__version__ = '3.5.0.1'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def _to_unicode(obj):
if isinstance(obj, str) and sys.version_info < (3,):
obj = unicode(obj, encoding='ascii', errors='strict')
return obj
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
# Divergence from upstream: ipaddress can't handle byte str
ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence from upstream: ipaddress can't handle byte str
host_ip = ipaddress.ip_address(_to_unicode(hostname))
except ValueError:
# Not an IP address (common case)
host_ip = None
except UnicodeError:
# Divergence from upstream: Have to deal with ipaddress not taking
# byte strings. addresses should be all ascii, so we consider it not
# an ipaddress in this case
host_ip = None
except AttributeError:
# Divergence from upstream: Make ipaddress library optional
if ipaddress is None:
host_ip = None
else:
raise
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| apache-2.0 |
pkdevbox/stratos | components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/databridge/thrift/thrift/server/THttpServer.py | 19 | 3135 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import BaseHTTPServer
from ..server import TServer
from ..transport import TTransport
class ResponseException(Exception):
"""Allows handlers to override the HTTP response
Normally, THttpServer always sends a 200 response. If a handler wants
to override this behavior (e.g., to simulate a misconfigured or
overloaded web server during testing), it can raise a ResponseException.
The function passed to the constructor will be called with the
RequestHandler as its only argument.
"""
def __init__(self, handler):
self.handler = handler
class THttpServer(TServer.TServer):
"""A simple HTTP-based Thrift server
This class is not very performant, but it is useful (for example) for
acting as a mock version of an Apache-based PHP Thrift endpoint.
"""
def __init__(self,
processor,
server_address,
inputProtocolFactory,
outputProtocolFactory=None,
server_class=BaseHTTPServer.HTTPServer):
"""Set up protocol factories and HTTP server.
See BaseHTTPServer for server_address.
See TServer for protocol factories.
"""
if outputProtocolFactory is None:
outputProtocolFactory = inputProtocolFactory
TServer.TServer.__init__(self, processor, None, None, None,
inputProtocolFactory, outputProtocolFactory)
thttpserver = self
class RequestHander(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
# Don't care about the request path.
itrans = TTransport.TFileObjectTransport(self.rfile)
otrans = TTransport.TFileObjectTransport(self.wfile)
itrans = TTransport.TBufferedTransport(
itrans, int(self.headers['Content-Length']))
otrans = TTransport.TMemoryBuffer()
iprot = thttpserver.inputProtocolFactory.getProtocol(itrans)
oprot = thttpserver.outputProtocolFactory.getProtocol(otrans)
try:
thttpserver.processor.process(iprot, oprot)
except ResponseException, exn:
exn.handler(self)
else:
self.send_response(200)
self.send_header("content-type", "application/x-thrift")
self.end_headers()
self.wfile.write(otrans.getvalue())
self.httpd = server_class(server_address, RequestHander)
def serve(self):
self.httpd.serve_forever()
| apache-2.0 |
cmlasu/smm_gem5 | ext/ply/example/ansic/clex.py | 164 | 3931 | # ----------------------------------------------------------------------
# clex.py
#
# A lexer for ANSI C.
# ----------------------------------------------------------------------
import sys
sys.path.insert(0,"../..")
import ply.lex as lex
# Reserved words
reserved = (
'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE',
'ELSE', 'ENUM', 'EXTERN', 'FLOAT', 'FOR', 'GOTO', 'IF', 'INT', 'LONG', 'REGISTER',
'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'SWITCH', 'TYPEDEF',
'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WHILE',
)
tokens = reserved + (
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Conditional operator (?)
'CONDOP',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
)
# Completely ignored characters
t_ignore = ' \t\x0c'
# Newlines
def t_NEWLINE(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'^='
# Increment/decrement
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
# ->
t_ARROW = r'->'
# ?
t_CONDOP = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers and reserved words
reserved_map = { }
for r in reserved:
reserved_map[r.lower()] = r
def t_ID(t):
r'[A-Za-z_][\w_]*'
t.type = reserved_map.get(t.value,"ID")
return t
# Integer literal
t_ICONST = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FCONST = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CCONST = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comments
def t_comment(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
# Preprocessor directive (ignored)
def t_preprocessor(t):
r'\#(.)*?\n'
t.lexer.lineno += 1
def t_error(t):
print("Illegal character %s" % repr(t.value[0]))
t.lexer.skip(1)
lexer = lex.lex(optimize=1)
if __name__ == "__main__":
lex.runmain(lexer)
| bsd-3-clause |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pygments/styles/xcode.py | 31 | 1501 | # -*- coding: utf-8 -*-
"""
pygments.styles.xcode
~~~~~~~~~~~~~~~~~~~~~
Style similar to the `Xcode` default theme.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Literal
class XcodeStyle(Style):
"""
Style similar to the Xcode default colouring theme.
"""
default_style = ''
styles = {
Comment: '#177500',
Comment.Preproc: '#633820',
String: '#C41A16',
String.Char: '#2300CE',
Operator: '#000000',
Keyword: '#A90D91',
Name: '#000000',
Name.Attribute: '#836C28',
Name.Class: '#3F6E75',
Name.Function: '#000000',
Name.Builtin: '#A90D91',
# In Obj-C code this token is used to colour Cocoa types
Name.Builtin.Pseudo: '#5B269A',
Name.Variable: '#000000',
Name.Tag: '#000000',
Name.Decorator: '#000000',
# Workaround for a BUG here: lexer treats multiline method signatres as labels
Name.Label: '#000000',
Literal: '#1C01CE',
Number: '#1C01CE',
Error: '#000000',
}
| apache-2.0 |
boudewijnrempt/breakpad | src/third_party/protobuf/protobuf/python/google/protobuf/service.py | 590 | 9131 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = '[email protected] (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
| bsd-3-clause |
kozyarchuk/NCT-workers | tests/reactive_test.py | 1 | 9204 | import unittest
from nct.utils.reactive.field import Field
from nct.utils.reactive.field_factory import FieldFactory
from decimal import Decimal
import datetime
from nct.utils.reactive.bound_field import BoundField, InvalidModelError,\
InvalidFieldDefinitionlError, DataTypeConversionError
from datetime import date
import sys
if sys.version_info[:2] == (2, 7):
unittest.TestCase.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
class FieldTest(unittest.TestCase):
def test_field_construction(self):
f = Field(name = "f1",
datatype = str,
validation_method = 'vm',
calculation_method = 'cm',
domain_mapping = 'dm')
self.assertEquals("f1", f.name)
self.assertEquals(str, f.datatype)
self.assertEquals("vm", f.validation_method)
self.assertEquals("cm", f.calculation_method)
self.assertEquals("dm", f.domain_mapping)
def test_field_factory_loads_quantity(self):
f = FieldFactory.get_field('quantity')
self.assertEquals('quantity', f.name)
self.assertEquals(Decimal, f.datatype)
self.assertEquals(None, f.calculation_method)
self.assertEquals('must_be_provided', f.validation_method)
self.assertEquals('Trade.quantity', f.domain_mapping)
def test_field_factory_loads_action(self):
f = FieldFactory.get_field('action')
self.assertEquals('action', f.name)
self.assertEquals(str, f.datatype)
self.assertEquals(None, f.calculation_method)
self.assertEquals('must_be_provided', f.validation_method)
self.assertEquals('map_trade_action', f.domain_mapping)
def test_field_factory_loads_trade_date(self):
f = FieldFactory.get_field('trade_date')
self.assertEquals('trade_date', f.name)
self.assertEquals(datetime.date, f.datatype)
self.assertEquals(None, f.calculation_method)
self.assertEquals('must_be_provided', f.validation_method)
self.assertEquals('Trade.trade_date', f.domain_mapping)
def test_field_factory_missing_field(self):
self.assertRaisesRegex(InvalidFieldDefinitionlError, 'invalid_field is not a valid field', FieldFactory.get_field, 'invalid_field')
class StubModel(object):
FIELD_DEPENDS = {
'quantity':[]
}
def val_meth(self, field):
return "Validation Error"
def calc_meth(self):
return "123"
def mapper(self, field, direction):
if direction == field.TO:
self.domain_value = field.value
else:
return self.domain_value
class BoundFieldTest(unittest.TestCase):
def create_field(self):
f = Field(name="f1",
datatype=Decimal,
validation_method='val_meth',
calculation_method='calc_meth',
domain_mapping='mapper')
return f
def test_can_create(self):
f = self.create_field()
m = StubModel()
bf = BoundField(f , m)
self.assertEquals(m.calc_meth, bf.calculation_method)
self.assertEquals(m.val_meth, bf.validation_method)
self.assertEquals(m.mapper, bf.domain_mapping_method)
self.assertEquals(f, bf.definition )
self.assertEquals('f1', bf.name)
self.assertEquals(None, bf.value )
self.assertEquals(False, bf.has_value)
self.assertEquals(False, bf.has_user_entered_value)
def test_bimd_method_checks_argument_count(self):
f = self.create_field()
m = StubModel()
bf = BoundField(f , m)
self.assertRaisesRegex(InvalidModelError, "Wrong number of arguments to calc_meth method. Expected 3 got 1", bf._bind_method, 'calculation_method', m, 3)
def test_bind_method_raises_error_if_method_is_missing_from_model(self):
f = Field(name="f1", datatype=str,
validation_method='method_is_missing')
m = StubModel()
self.assertRaisesRegex(InvalidModelError, "method_is_missing is not defined in StubModel", BoundField, f , m)
def test_set_value_converts_datatype_and_set_value_flag(self):
f = self.create_field()
bf = BoundField(f , StubModel())
bf.set_value("100.1")
self.assertEquals(Decimal("100.1"), bf.value)
self.assertEquals(True, bf.has_value)
self.assertEquals(True, bf.has_user_entered_value)
def test_set_value_produces_readable_errors_when_conversion_failed_decimal(self):
bf = BoundField(Field(name="f1", datatype=Decimal ) , StubModel())
self.assertRaisesRegex(DataTypeConversionError, "Invalid value >abv< needs to be Numeric", bf.set_value, "abv")
def test_set_value_produces_readable_errors_when_conversion_failed_date(self):
bf = BoundField(Field(name="f1", datatype=datetime.date) , StubModel())
self.assertRaisesRegex(DataTypeConversionError, "Invalid value >2012-13-12< needs to be YYYY-MM-DD format", bf.set_value, "2012-13-12")
def test_set_value_does_not_set_user_entered_flag(self):
f = self.create_field()
bf = BoundField(f , StubModel())
bf.set_value(Decimal("100.1"),user_entered=False)
self.assertEquals(Decimal("100.1"), bf.value)
self.assertEquals(True, bf.has_value)
self.assertEquals(False, bf.has_user_entered_value)
def test_set_value_unsets_value_on_none(self):
f = self.create_field()
bf = BoundField(f , StubModel())
bf.set_value(Decimal("100.1"))
bf.set_value(None)
self.assertEquals( None, bf.value)
self.assertEquals( False, bf.has_value)
self.assertEquals( False, bf.has_user_entered_value)
def test_recalc(self):
f = self.create_field()
bf = BoundField(f , StubModel())
self.assertEquals(True, bf.recalc() )
self.assertEquals(Decimal("123"), bf.value)
bf.set_value("101")
self.assertEquals(False, bf.recalc() )
self.assertEquals(Decimal("101"), bf.value)
def test_recalc_when_calc_method_is_none(self):
f = Field(name="f1", datatype=Decimal )
bf = BoundField(f , StubModel())
self.assertEquals(False, bf.recalc() )
self.assertEquals(None, bf.value)
def test_validate(self):
f = self.create_field()
bf = BoundField(f , StubModel())
self.assertEquals( 'Validation Error', bf.validate() )
def test_validate_when_val_meth_is_not_set(self):
f = Field(name="f1", datatype=Decimal )
bf = BoundField(f , StubModel())
self.assertEquals( None, bf.validate() )
def test_bind_domain_mapping_method_when_no_mapping(self):
f = Field(name="f1", datatype=Decimal )
bf = BoundField(f , StubModel())
self.assertEquals( None, bf.domain_mapping_method )
def test_bind_domain_mapping_method_when_invalid_mapping(self):
f = Field(name="f1", datatype=Decimal, domain_mapping="a.b.c" )
self.assertRaisesRegex(InvalidFieldDefinitionlError, 'Invalid domain_mapping a.b.c for field f1', BoundField, f , StubModel())
def test_bind_domain_mapping_direct_model_must_have_domain_object(self):
f = Field(name="f1", datatype=Decimal, domain_mapping="do.f1" )
self.assertRaisesRegex(InvalidModelError, 'StubModel does not support get_domain_object method', BoundField, f , StubModel())
def test_map_to_and_from_domain_using_direct_syntax(self):
class DomainObject:
f1 = 'aaa'
do = DomainObject()
f = Field(name="f1", datatype=Decimal, domain_mapping="do.f1" )
m = StubModel()
m.get_domain_object = lambda name: do
bf = BoundField( f , m)
bf.set_value("123")
bf.map_to_domain()
self.assertEqual(Decimal("123"), do.f1)
do.f1 = Decimal("456")
bf.map_from_domain()
self.assertEqual(Decimal("456"), bf.value)
def test_map_to_and_from_domain_using_model_proxy(self):
f = self.create_field()
m = StubModel()
bf = BoundField( f , m)
bf.set_value("123")
bf.map_to_domain()
self.assertEqual(Decimal("123"), m.domain_value)
m.domain_value = Decimal('456')
bf.map_from_domain()
self.assertEqual(Decimal("456"), bf.value)
def test_map_to_and_from_domain_when_no_mapper(self):
f = Field(name="f1", datatype=Decimal )
bf = BoundField(f , StubModel())
bf.set_value("123")
bf.map_to_domain()
bf.set_value("456")
bf.map_from_domain()
self.assertEqual(Decimal("456"), bf.value)
def test_set_date_fields_as_date(self):
f = Field(name="f1", datatype=datetime.date )
bf = BoundField(f , StubModel())
bf.set_value(date(2015,1,1))
self.assertEquals(date(2015,1,1), bf.value)
def test_set_date_fields_as_string(self):
f = Field(name="f1", datatype=datetime.date )
bf = BoundField(f , StubModel())
bf.set_value("2015-01-02")
self.assertEquals(date(2015,1,2), bf.value)
| gpl-2.0 |
jerpat/csmake | csmake-providers/CsmakeModules/GenerateGitChangeLog.py | 1 | 12101 | # <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
# <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
from Csmake.CsmakeModule import CsmakeModule
from CsmakeProviders.GitProvider import GitProvider
import os.path
import codecs
import git
import yaml
class GenerateGitChangeLog(CsmakeModule):
"""Purpose: Generate a changelog of the changes between two git hashes
Library: csmake-providers
Mapping: Use **yields-files to specify the output and the format
For example:
**yields-files =
<migration (log:changelog)> migration/changelog.txt
Will generate a text log of the records
Mapping types supported:
tsv - Tab separated values format
URL\tSHA1\tsummary\tauthor\tiso date of change
csv - Comma separated values format
URL, SHA1, summary, author, iso date of change
yaml - YAML formatted output
log - Simple text log format (like git log --oneline)
SHA1[:7] summary
NOTE: To change the formatting, specialize GenerateGitChangeLog
and define _formatOutput and/or (re)define the appropriate
_<type>Formatter (see implementation comments)
Options:
old - The old ref/SHA-1 to compare
(branch:<name>, tag:<name>, <reference/sha>)
new - The new ref/SHA-1 to compare
(branch:<name>, tag:<name>, <reference/sha>)
repo - The git URL for the repo
local - (OPTIONAL) Local location for repository
Default: %(RESULTS)s/<name of repo>
Phases:
build - Generate a change log based on the given ref/SHA1's
"""
REQUIRED_OPTIONS = ['old', 'new', 'repo']
#--------------------------------------------------------------
# Formatter classes
# By defualt the formatting will attempt to instantiate a
# locally defined _<type>Formatter
#--------------------------------------------------------------
class _logFormatter:
def nolog(self, fileobj, extrainfo={}):
fileobj.write("*** No Changes Detected ***\n")
if 'errortext' in extrainfo:
fileobj.write("----- %s" % extrainfo['errortext'])
def output(self, fileobj, changelog, extrainfo={}):
if changelog['type'] == 'parent':
for log in changelog['history']:
self.customize(log, extrainfo)
fileobj.write("%s\n" % self.formatRecord(log))
else:
self.diverged(fileobj, changelog, extrainfo)
def customize(self, logentry, extrainfo):
#Use this to create custom fields from the information present
#Update the logentry dictionary directly.
logentry.update(extrainfo)
def diverged(self, fileobj, changelog, extrainfo):
fileobj.write(
"NOTE: Record history diverges\n")
old = result['old']
if old is not None:
fileobj.write("----- Old Record:\n")
self.customize(old, extrainfo)
fileobj.write(" %s\n" % self.formatRecord(old))
new = result['new']
if new is not None:
fileobj.write("----- New Record:\n")
self.customize(new, extrainfo)
fileobj.write(" %s\n" % self.formatRecord(new))
common = result['common']
if common is not None:
fileobj.write("----- Common Record:\n")
self.customize(common, extrainfo)
fileobj.write(" %s\n" % self.formatRecord(common))
def formatRecord(self, logentry):
return "%(SHA1)0.7s %(summary)s" % logentry
class _tsvFormatter(_logFormatter):
def formatRecord(self, logentry):
return "%(URL)s\t%(SHA1)s\t%(summary)s\t%(author)s\t%(isodate)s" % logentry
def customize(self, logentry, extrainfo):
GenerateGitChangeLog._logFormatter.customize(self, logentry, extrainfo)
logentry['isodate'] = logentry['datetime'].isoformat()
class _csvFormatter(_logFormatter):
def formatRecord(self, logentry):
return "%(URL)s,%(SHA1)s,\"%(summary)s\",\"%(author)s\",%(isodate)s" % logentry
def customize(self, logentry, extrainfo):
GenerateGitChangeLog._logFormatter.customize(self, logentry, extrainfo)
logentry['isodate'] = logentry['datetime'].isoformat()
class _yamlFormatter(_logFormatter):
def nolog(self, fileobj, extrainfo):
result = {'message': "*** No Change Detected ***"}
result.update(extrainfo)
yamltext = yaml.safe_dump(result)
fileobj.write(yamltext)
def output(self, fileobj, changelog, extrainfo={}):
changelog.update(extrainfo)
yamltext = yaml.safe_dump(changelog)
fileobj.write(yamltext)
#---------------------------------------------------------------
# _formatOutput
# Top function for formatting output
#---------------------------------------------------------------
def _formatOutput(self, changelog, files, extrainfo={}):
for spec in files:
classString = '_%sFormatter' % spec[1]
if not hasattr(self, classString):
self.log.error("File '%s' type '%s' does not have a output formatter defined", spec[0], spec[1])
else:
UTF8Writer = codecs.getwriter('utf8')
with open(spec[0], 'w') as fileobj:
fileobj = UTF8Writer(fileobj)
formatter = getattr(self, classString)()
if changelog is None:
formatter.nolog(fileobj, extrainfo)
else:
formatter.output(fileobj, changelog, extrainfo)
#---------------------------------------------------------------
# csmake Phase implementations
#---------------------------------------------------------------
def build(self, options):
if self.yieldsfiles is None:
self.log.warning("**yields-files was not specified in section. Nothing to do...")
self.log.warning(" No changelog will be produced")
self.log.passed()
return None
filespecs = []
for index in self.yieldsfiles:
location = index['location']
if not location.startswith('/') and \
not location.startswith('./'):
location = os.path.join(
self.env.env['RESULTS'],
index['location'] )
filespecs.append((location, index['type']))
repoURL = options['repo']
findrefs = False
nochange = False
nochangeString = "Unknown reason"
if len(options['old']) == 0 or options['old'] == '<unknown>':
self.log.info("There is no valid previous version")
nochangeString = "No valid previous version"
nochange = True
if options['old'] == options['new']:
self.log.info("The old and new versions are the same, no change")
nochangeString = "Old and new versions identical"
nochange = True
if nochange:
self._formatOutput(None, filespecs, {'errortext': nochangeString})
self.log.passed()
return True
if 'local' in options:
localrepo = options['local']
_, name = os.path.split(localrepo)
else:
_, localrepo = os.path.split(repoURL)
name = localrepo
localrepo = os.path.join(
self.env.env['RESULTS'],
localrepo )
newref, newreftype = GitProvider.splitRepoReference(options['new'])
oldref, oldreftype = GitProvider.splitRepoReference(options['old'])
try:
repo = git.Repo(localrepo)
except git.exc.NoSuchPathError:
GitProvider.fetchRepository(
self.log,
name,
localrepo,
repoURL,
newref,
newreftype,
secure=False )
repo = git.Repo(localrepo)
remote, _ = GitProvider.ensureCsmakeRemote(repo, repoURL)
try:
new = GitProvider.getSHAFromRef(
self.log,
repo,
remote,
newref,
newreftype )
old = GitProvider.getSHAFromRef(
self.log,
repo,
remote,
oldref,
oldreftype )
except Exception as e:
self.log.info("getSHA threw '%s' attempting to lookup ref, retrying", str(e))
GitProvider.fetchRepository(
self.log,
name,
localrepo,
repo,
newref,
newreftype,
secure=False )
new = GitProvider.getSHAFromRef(
self.log,
repo,
remote,
newref,
newreftype )
old = GitProvider.getSHAFromRef(
self.log,
repo,
remote,
oldref,
oldreftype )
try:
result = GitProvider.generateSpanLogInfo(
self.log,
localrepo,
old,
new,
findrefs )
except Exception as e:
self.log.info("Span log threw '%s' attempting to fetch and try again", str(e))
try:
GitProvider.fetchRepository(
self.log,
name,
localrepo,
repo,
newref,
newreftype,
secure=False )
result = GitProvider.generateSpanLogInfo(
self.log,
localrepo,
old,
new,
findrefs )
except:
self.log.exception("Attempt to generate git changelog failed")
self.log.failed()
return None
self.log.devdebug("result == %s", str(result))
self._formatOutput(result,filespecs,{'URL' : repoURL})
self.log.passed()
| gpl-3.0 |
solderinskater/Push-2010 | maemo/gui/proto/tricksimulator.py | 1 | 1362 | _all_=["TrickSimulator"]
# -*- coding: UTF-8 -*-
import sys
import sip
import time
import dbus
from traceback import print_exc
import random
from dbus.mainloop.glib import DBusGMainLoop
import gobject
#iface.RaiseException(reply_handler=handle_raise_reply,
# error_handler=handle_raise_error)
def replay():
isStopped = False
while not (isStopped):
#print "and go"
# Random warten
sleeper = random.randint(1,2)
trickno = random.randint(0,4)
time.sleep(sleeper)
iface.TrickCommit(tricks[trickno])
print "hello dbus: " + tricks[trickno] + " after " + str(sleeper)
# iface.TrickCommit("Hello from Trick " + str(line))
isStopped = True
def stop(self):
isStopped = True
print "WHEEEEEEEEEEEEEEEEEEEEEEEEEEEE\n\n\n"
tricks = ['ollie','360','kickflip','heelflip','shove']
#dbus.mainloop.qt.DBusQtMainLoop(set_as_default=True)
#DBusGMainLoop(set_as_default=True)
isStopped = True
failed = False
hello_replied = False
raise_replied = False
bus = dbus.SessionBus()
try:
remote_object = bus.get_object("net.prometoys.solderinskater.TrickService",
"/net/prometoys/solderinskater/TrickService/DbusTrickObject")
except dbus.DBusException:
print_exc()
sys.exit(1)
iface = dbus.Interface(remote_object, "net.prometoys.solderinskater.TrickService")
print "Tricksimulator ready to go"
replay()
| gpl-3.0 |
grap/OpenUpgrade | addons/hr_timesheet_invoice/wizard/hr_timesheet_invoice_create.py | 337 | 3655 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_timesheet_invoice_create(osv.osv_memory):
_name = 'hr.timesheet.invoice.create'
_description = 'Create invoice from timesheet'
_columns = {
'date': fields.boolean('Date', help='The real date of each work will be displayed on the invoice'),
'time': fields.boolean('Time spent', help='The time of each work done will be displayed on the invoice'),
'name': fields.boolean('Description', help='The detail of each work done will be displayed on the invoice'),
'price': fields.boolean('Cost', help='The cost of each work done will be displayed on the invoice. You probably don\'t want to check this'),
'product': fields.many2one('product.product', 'Force Product', help='Fill this field only if you want to force to use a specific product. Keep empty to use the real product that comes from the cost.'),
}
_defaults = {
'date': 1,
'name': 1,
}
def view_init(self, cr, uid, fields, context=None):
"""
This function checks for precondition before wizard executes
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
"""
analytic_obj = self.pool.get('account.analytic.line')
data = context and context.get('active_ids', [])
for analytic in analytic_obj.browse(cr, uid, data, context=context):
if analytic.invoice_id:
raise osv.except_osv(_('Warning!'), _("Invoice is already linked to some of the analytic line(s)!"))
def do_create(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
# Create an invoice based on selected timesheet lines
invs = self.pool.get('account.analytic.line').invoice_cost_create(cr, uid, context['active_ids'], data, context=context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
mod_ids = mod_obj.search(cr, uid, [('name', '=', 'action_invoice_tree1')], context=context)
res_id = mod_obj.read(cr, uid, mod_ids, ['res_id'], context=context)[0]['res_id']
act_win = act_obj.read(cr, uid, [res_id], context=context)[0]
act_win['domain'] = [('id','in',invs),('type','=','out_invoice')]
act_win['name'] = _('Invoices')
return act_win
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
squirrel2038/thearchdruidreport-archive | check-wayback-machine.py | 1 | 1674 | #!/usr/bin/env python3
from datetime import datetime, timezone, timedelta
import json
import re
import sys
import traceback
import feeds
import util
import web_cache
BLOG_POSTS = json.loads(util.get_file_text("blog.json"))
for post in BLOG_POSTS:
page_count = (len(post["comments"]) + 199) // 200
print("DEBUG:", post["url"], len(post["comments"]), page_count)
for page in range(1, page_count + 1):
url = post["url"] if page == 1 else ("%s?commentPage=%d" % (post["url"], page))
print("DEBUG:", url)
obj = json.loads(web_cache.get("https://archive.org/wayback/available?url=" + url).decode("utf8"))
try:
snap = obj["archived_snapshots"]["closest"]
assert snap["available"] == True
assert snap["status"] == "200"
ts = re.match(r"^(\d\d\d\d)(\d\d)(\d\d)(\d\d)(\d\d)(\d\d)$", snap["timestamp"])
assert ts
m = re.match(r"^http://web\.archive\.org/web/(\d+)/https?:(//.*)$", snap["url"])
if not m:
print(snap["url"])
assert False
assert m.group(1) == snap["timestamp"]
assert m.group(2) == re.sub(r"^https://", "//", url)
comment_latest = feeds.parse_timestamp(post["comments"][-1]["updated"])
archive_latest = datetime(*[int(ts.group(i)) for i in range(1, 7)], tzinfo=timezone.utc)
if archive_latest - comment_latest < timedelta(days=3):
print("WARNING: archive is recent:", (archive_latest - comment_latest))
except:
sys.stdout.write("WARNING: EXCEPTION RAISED: ")
traceback.print_exc(file=sys.stdout)
| mit |
lokeshjindal15/pd-gem5 | src/cpu/DummyChecker.py | 69 | 2259 | # Copyright (c) 2010-2011 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Geoffrey Blake
from m5.params import *
from CheckerCPU import CheckerCPU
class DummyChecker(CheckerCPU):
type = 'DummyChecker'
cxx_header = 'cpu/dummy_checker.hh'
| bsd-3-clause |
JianyuWang/nova | nova/scheduler/filters/metrics_filter.py | 16 | 2007 | # Copyright (c) 2014 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova.scheduler import filters
from nova.scheduler import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('weight_setting',
'nova.scheduler.weights.metrics',
group='metrics')
class MetricsFilter(filters.BaseHostFilter):
"""Metrics Filter
This filter is used to filter out those hosts which don't have the
corresponding metrics so these the metrics weigher won't fail due to
these hosts.
"""
def __init__(self):
super(MetricsFilter, self).__init__()
opts = utils.parse_options(CONF.metrics.weight_setting,
sep='=',
converter=float,
name="metrics.weight_setting")
self.keys = set([x[0] for x in opts])
def host_passes(self, host_state, filter_properties):
metrics_on_host = set(m.name for m in host_state.metrics)
if not self.keys.issubset(metrics_on_host):
unavail = metrics_on_host - self.keys
LOG.debug("%(host_state)s does not have the following "
"metrics: %(metrics)s",
{'host_state': host_state,
'metrics': ', '.join(unavail)})
return False
return True
| apache-2.0 |
duongbaoduy/gtools | third_party/coverage/__init__.py | 208 | 4505 | """Code coverage measurement for Python.
Ned Batchelder
http://nedbatchelder.com/code/coverage
"""
from coverage.version import __version__, __url__
from coverage.control import coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability, so the current api uses
# explicitly-created coverage objects. But for backward compatibility, here we
# define the top-level functions to create the singleton when they are first
# called.
# Singleton object for use with module-level functions. The singleton is
# created as needed when one of the module-level functions is called.
_the_coverage = None
def _singleton_method(name):
"""Return a function to the `name` method on a singleton `coverage` object.
The singleton object is created the first time one of these functions is
called.
"""
# Disable pylint msg W0612, because a bunch of variables look unused, but
# they're accessed via locals().
# pylint: disable=W0612
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
import inspect
meth = getattr(coverage, name)
args, varargs, kw, defaults = inspect.getargspec(meth)
argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
docstring = meth.__doc__
wrapper.__doc__ = ("""\
A first-use-singleton wrapper around coverage.%(name)s.
This wrapper is provided for backward compatibility with legacy code.
New code should use coverage.%(name)s directly.
%(name)s%(argspec)s:
%(docstring)s
""" % locals()
)
return wrapper
# Define the module-level functions.
use_cache = _singleton_method('use_cache')
start = _singleton_method('start')
stop = _singleton_method('stop')
erase = _singleton_method('erase')
exclude = _singleton_method('exclude')
analysis = _singleton_method('analysis')
analysis2 = _singleton_method('analysis2')
report = _singleton_method('report')
annotate = _singleton_method('annotate')
# On Windows, we encode and decode deep enough that something goes wrong and
# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
# Adding a reference here prevents it from being unloaded. Yuk.
import encodings.utf_8
# Because of the "from coverage.control import fooey" lines at the top of the
# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
# This makes some inspection tools (like pydoc) unable to find the class
# coverage.coverage. So remove that entry.
import sys
try:
del sys.modules['coverage.coverage']
except KeyError:
pass
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
# Copyright 2004-2013 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
| bsd-3-clause |
MridulS/sympy | sympy/__init__.py | 19 | 2176 | """SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://sympy.org
"""
from __future__ import absolute_import, print_function
from sympy.release import __version__
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, mathematica_code, octave_code, \
latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
| bsd-3-clause |
MikkCZ/kitsune | kitsune/questions/migrations/0003_auto_20150430_1304.py | 20 | 4834 | # -*- coding: utf-8 -*-
"""
Update list of locale choices in the `question.locale` and `questionlocale.locale` fields.
"""
from __future__ import unicode_literals
from django.db import models, migrations
import kitsune.sumo.models
class Migration(migrations.Migration):
dependencies = [
('questions', '0002_initial_data'),
]
operations = [
migrations.AlterField(
model_name='question',
name='locale',
field=kitsune.sumo.models.LocaleField(default=b'en-US', max_length=7, choices=[(b'af', 'Afrikaans'), (b'ar', '\u0639\u0631\u0628\u064a'), (b'az', 'Az\u0259rbaycanca'), (b'bg', '\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438'), (b'bn-BD', '\u09ac\u09be\u0982\u09b2\u09be (\u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6)'), (b'bn-IN', '\u09ac\u09be\u0982\u09b2\u09be (\u09ad\u09be\u09b0\u09a4)'), (b'bs', 'Bosanski'), (b'ca', 'catal\xe0'), (b'cs', '\u010ce\u0161tina'), (b'da', 'Dansk'), (b'de', 'Deutsch'), (b'ee', '\xc8\u028begbe'), (b'el', '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac'), (b'en-US', 'English'), (b'es', 'Espa\xf1ol'), (b'et', 'eesti keel'), (b'eu', 'Euskara'), (b'fa', '\u0641\u0627\u0631\u0633\u06cc'), (b'fi', 'suomi'), (b'fr', 'Fran\xe7ais'), (b'fy-NL', 'Frysk'), (b'ga-IE', 'Gaeilge (\xc9ire)'), (b'gl', 'Galego'), (b'gu-IN', '\u0a97\u0ac1\u0a9c\u0ab0\u0abe\u0aa4\u0ac0'), (b'ha', '\u0647\u064e\u0631\u0652\u0634\u064e\u0646 \u0647\u064e\u0648\u0652\u0633\u064e'), (b'he', '\u05e2\u05d1\u05e8\u05d9\u05ea'), (b'hi-IN', '\u0939\u093f\u0928\u094d\u0926\u0940 (\u092d\u093e\u0930\u0924)'), (b'hr', 'Hrvatski'), (b'hu', 'Magyar'), (b'id', 'Bahasa Indonesia'), (b'ig', 'As\u1ee5s\u1ee5 Igbo'), (b'it', 'Italiano'), (b'ja', '\u65e5\u672c\u8a9e'), (b'km', '\u1781\u17d2\u1798\u17c2\u179a'), (b'kn', '\u0c95\u0ca8\u0ccd\u0ca8\u0ca1'), (b'ko', '\ud55c\uad6d\uc5b4'), (b'ln', 'Ling\xe1la'), (b'lt', 'lietuvi\u0173 kalba'), (b'ml', '\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02'), (b'ne-NP', '\u0928\u0947\u092a\u093e\u0932\u0940'), (b'nl', 'Nederlands'), (b'no', 'Norsk'), (b'pl', 'Polski'), (b'pt-BR', 'Portugu\xeas (do Brasil)'), (b'pt-PT', 'Portugu\xeas (Europeu)'), (b'ro', 'rom\xe2n\u0103'), (b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'si', '\u0dc3\u0dd2\u0d82\u0dc4\u0dbd'), (b'sk', 'sloven\u010dina'), (b'sl', 'sloven\u0161\u010dina'), (b'sq', 'Shqip'), (b'sr-Cyrl', '\u0421\u0440\u043f\u0441\u043a\u0438'), (b'sw', 'Kiswahili'), (b'sv', 'Svenska'), (b'ta', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd'), (b'ta-LK', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd (\u0b87\u0bb2\u0b99\u0bcd\u0b95\u0bc8)'), (b'te', '\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41'), (b'th', '\u0e44\u0e17\u0e22'), (b'tr', 'T\xfcrk\xe7e'), (b'uk', '\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'), (b'ur', '\u0627\u064f\u0631\u062f\u0648'), (b'vi', 'Ti\u1ebfng Vi\u1ec7t'), (b'wo', 'Wolof'), (b'xh', 'isiXhosa'), (b'yo', '\xe8d\xe8 Yor\xf9b\xe1'), (b'zh-CN', '\u4e2d\u6587 (\u7b80\u4f53)'), (b'zh-TW', '\u6b63\u9ad4\u4e2d\u6587 (\u7e41\u9ad4)'), (b'zu', 'isiZulu')]),
preserve_default=True,
),
migrations.AlterField(
model_name='questionlocale',
name='locale',
field=kitsune.sumo.models.LocaleField(default=b'en-US', unique=True, max_length=7, choices=[(b'af', 'Afrikaans'), (b'ar', 'Arabic'), (b'az', 'Azerbaijani'), (b'bg', 'Bulgarian'), (b'bn-BD', 'Bengali (Bangladesh)'), (b'bn-IN', 'Bengali (India)'), (b'bs', 'Bosnian'), (b'ca', 'Catalan'), (b'cs', 'Czech'), (b'da', 'Danish'), (b'de', 'German'), (b'ee', 'Ewe'), (b'el', 'Greek'), (b'en-US', 'English'), (b'es', 'Spanish'), (b'et', 'Estonian'), (b'eu', 'Basque'), (b'fa', 'Persian'), (b'fi', 'Finnish'), (b'fr', 'French'), (b'fy-NL', 'Frisian'), (b'ga-IE', 'Irish (Ireland)'), (b'gl', 'Galician'), (b'gu-IN', 'Gujarati'), (b'ha', 'Hausa'), (b'he', 'Hebrew'), (b'hi-IN', 'Hindi (India)'), (b'hr', 'Croatian'), (b'hu', 'Hungarian'), (b'id', 'Indonesian'), (b'ig', 'Igbo'), (b'it', 'Italian'), (b'ja', 'Japanese'), (b'km', 'Khmer'), (b'kn', 'Kannada'), (b'ko', 'Korean'), (b'ln', 'Lingala'), (b'lt', 'Lithuanian'), (b'ml', 'Malayalam'), (b'ne-NP', 'Nepali'), (b'nl', 'Dutch'), (b'no', 'Norwegian'), (b'pl', 'Polish'), (b'pt-BR', 'Portuguese (Brazilian)'), (b'pt-PT', 'Portuguese (Portugal)'), (b'ro', 'Romanian'), (b'ru', 'Russian'), (b'si', 'Sinhala'), (b'sk', 'Slovak'), (b'sl', 'Slovenian'), (b'sq', 'Albanian'), (b'sr-Cyrl', 'Serbian'), (b'sw', 'Swahili'), (b'sv', 'Swedish'), (b'ta', 'Tamil'), (b'ta-LK', 'Tamil (Sri Lanka)'), (b'te', 'Telugu'), (b'th', 'Thai'), (b'tr', 'Turkish'), (b'uk', 'Ukrainian'), (b'ur', 'Urdu'), (b'vi', 'Vietnamese'), (b'wo', 'Wolof'), (b'xh', 'Xhosa'), (b'yo', 'Yoruba'), (b'zh-CN', 'Chinese (Simplified)'), (b'zh-TW', 'Chinese (Traditional)'), (b'zu', 'Zulu')]),
preserve_default=True,
),
]
| bsd-3-clause |
kaoru6/asterisk | rest-api-templates/odict.py | 14 | 8918 | # Downloaded from http://code.activestate.com/recipes/576693/
# Licensed under the MIT License
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| gpl-2.0 |
mkreider/cocotb2 | cocotb/memdebug.py | 6 | 1833 | ''' Copyright (c) 2013 Potential Ventures Ltd
Copyright (c) 2013 SolarFlare Communications Inc
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Potential Ventures Ltd,
SolarFlare Communications Inc nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''
import cherrypy
import dowser
def start(port):
cherrypy.tree.mount(dowser.Root())
cherrypy.config.update({
'environment': 'embedded',
'server.socket_port': port
})
cherrypy.engine.start()
| bsd-3-clause |
nathanial/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/admin/templatetags/admin_modify.py | 157 | 1913 | from django import template
register = template.Library()
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if context['add'] and 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
prepopulated_fields_js = register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)(prepopulated_fields_js)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True
}
submit_row = register.inclusion_tag('admin/submit_line.html', takes_context=True)(submit_row)
| gpl-3.0 |
ge0rgi/cinder | cinder/volume/drivers/hpe/hpe_3par_common.py | 1 | 164598 | # (c) Copyright 2012-2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver common utilities for HPE 3PAR Storage array
The 3PAR drivers requires 3.1.3 firmware on the 3PAR array.
You will need to install the python hpe3parclient module.
sudo pip install python-3parclient
The drivers uses both the REST service and the SSH
command line to correctly operate. Since the
ssh credentials and the REST credentials can be different
we need to have settings for both.
The drivers requires the use of the san_ip, san_login,
san_password settings for ssh connections into the 3PAR
array. It also requires the setting of
hpe3par_api_url, hpe3par_username, hpe3par_password
for credentials to talk to the REST service on the 3PAR
array.
"""
import ast
import json
import math
import pprint
import re
import six
import uuid
from oslo_serialization import base64
from oslo_utils import importutils
hpe3parclient = importutils.try_import("hpe3parclient")
if hpe3parclient:
from hpe3parclient import client
from hpe3parclient import exceptions as hpeexceptions
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.objects import fields
from cinder.volume import qos_specs
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
import taskflow.engines
from taskflow.patterns import linear_flow
LOG = logging.getLogger(__name__)
MIN_CLIENT_VERSION = '4.2.0'
DEDUP_API_VERSION = 30201120
FLASH_CACHE_API_VERSION = 30201200
SRSTATLD_API_VERSION = 30201200
REMOTE_COPY_API_VERSION = 30202290
hpe3par_opts = [
cfg.StrOpt('hpe3par_api_url',
default='',
help="3PAR WSAPI Server Url like "
"https://<3par ip>:8080/api/v1",
deprecated_name='hp3par_api_url'),
cfg.StrOpt('hpe3par_username',
default='',
help="3PAR username with the 'edit' role",
deprecated_name='hp3par_username'),
cfg.StrOpt('hpe3par_password',
default='',
help="3PAR password for the user specified in hpe3par_username",
secret=True,
deprecated_name='hp3par_password'),
cfg.ListOpt('hpe3par_cpg',
default=["OpenStack"],
help="List of the CPG(s) to use for volume creation",
deprecated_name='hp3par_cpg'),
cfg.StrOpt('hpe3par_cpg_snap',
default="",
help="The CPG to use for Snapshots for volumes. "
"If empty the userCPG will be used.",
deprecated_name='hp3par_cpg_snap'),
cfg.StrOpt('hpe3par_snapshot_retention',
default="",
help="The time in hours to retain a snapshot. "
"You can't delete it before this expires.",
deprecated_name='hp3par_snapshot_retention'),
cfg.StrOpt('hpe3par_snapshot_expiration',
default="",
help="The time in hours when a snapshot expires "
" and is deleted. This must be larger than expiration",
deprecated_name='hp3par_snapshot_expiration'),
cfg.BoolOpt('hpe3par_debug',
default=False,
help="Enable HTTP debugging to 3PAR",
deprecated_name='hp3par_debug'),
cfg.ListOpt('hpe3par_iscsi_ips',
default=[],
help="List of target iSCSI addresses to use.",
deprecated_name='hp3par_iscsi_ips'),
cfg.BoolOpt('hpe3par_iscsi_chap_enabled',
default=False,
help="Enable CHAP authentication for iSCSI connections.",
deprecated_name='hp3par_iscsi_chap_enabled'),
]
CONF = cfg.CONF
CONF.register_opts(hpe3par_opts)
# Input/output (total read/write) operations per second.
THROUGHPUT = 'throughput'
# Data processed (total read/write) per unit time: kilobytes per second.
BANDWIDTH = 'bandwidth'
# Response time (total read/write): microseconds.
LATENCY = 'latency'
# IO size (total read/write): kilobytes.
IO_SIZE = 'io_size'
# Queue length for processing IO requests
QUEUE_LENGTH = 'queue_length'
# Average busy percentage
AVG_BUSY_PERC = 'avg_busy_perc'
class HPE3PARCommon(object):
"""Class that contains common code for the 3PAR drivers.
Version history:
.. code-block:: none
1.2.0 - Updated hp3parclient API use to 2.0.x
1.2.1 - Check that the VVS exists
1.2.2 - log prior to raising exceptions
1.2.3 - Methods to update key/value pair bug #1258033
1.2.4 - Remove deprecated config option hp3par_domain
1.2.5 - Raise Ex when deleting snapshot with dependencies bug #1250249
1.2.6 - Allow optional specifying n:s:p for vlun creation bug #1269515
This update now requires 3.1.2 MU3 firmware
1.3.0 - Removed all SSH code. We rely on the hp3parclient now.
2.0.0 - Update hp3parclient API uses 3.0.x
2.0.1 - Updated to use qos_specs, added new qos settings and personas
2.0.2 - Add back-end assisted volume migrate
2.0.3 - Allow deleting missing snapshots bug #1283233
2.0.4 - Allow volumes created from snapshots to be larger bug #1279478
2.0.5 - Fix extend volume units bug #1284368
2.0.6 - use loopingcall.wait instead of time.sleep
2.0.7 - Allow extend volume based on snapshot bug #1285906
2.0.8 - Fix detach issue for multiple hosts bug #1288927
2.0.9 - Remove unused 3PAR driver method bug #1310807
2.0.10 - Fixed an issue with 3PAR vlun location bug #1315542
2.0.11 - Remove hp3parclient requirement from unit tests #1315195
2.0.12 - Volume detach hangs when host is in a host set bug #1317134
2.0.13 - Added support for managing/unmanaging of volumes
2.0.14 - Modified manage volume to use standard 'source-name' element.
2.0.15 - Added support for volume retype
2.0.16 - Add a better log during delete_volume time. Bug #1349636
2.0.17 - Added iSCSI CHAP support
This update now requires 3.1.3 MU1 firmware
and hp3parclient 3.1.0
2.0.18 - HP 3PAR manage_existing with volume-type support
2.0.19 - Update default persona from Generic to Generic-ALUA
2.0.20 - Configurable SSH missing key policy and known hosts file
2.0.21 - Remove bogus invalid snapCPG=None exception
2.0.22 - HP 3PAR drivers should not claim to have 'infinite' space
2.0.23 - Increase the hostname size from 23 to 31 Bug #1371242
2.0.24 - Add pools (hp3par_cpg now accepts a list of CPGs)
2.0.25 - Migrate without losing type settings bug #1356608
2.0.26 - Don't ignore extra-specs snap_cpg when missing cpg #1368972
2.0.27 - Fixing manage source-id error bug #1357075
2.0.28 - Removing locks bug #1381190
2.0.29 - Report a limitless cpg's stats better bug #1398651
2.0.30 - Update the minimum hp3parclient version bug #1402115
2.0.31 - Removed usage of host name cache #1398914
2.0.32 - Update LOG usage to fix translations. bug #1384312
2.0.33 - Fix host persona to match WSAPI mapping bug #1403997
2.0.34 - Fix log messages to match guidelines. bug #1411370
2.0.35 - Fix default snapCPG for manage_existing bug #1393609
2.0.36 - Added support for dedup provisioning
2.0.37 - Added support for enabling Flash Cache
2.0.38 - Add stats for hp3par goodness_function and filter_function
2.0.39 - Added support for updated detach_volume attachment.
2.0.40 - Make the 3PAR drivers honor the pool in create bug #1432876
2.0.41 - Only log versions at startup. bug #1447697
2.0.42 - Fix type for snapshot config settings. bug #1461640
2.0.43 - Report the capability of supporting multiattach
2.0.44 - Update help strings to reduce the 3PAR user role requirements
2.0.45 - Python 3 fixes
2.0.46 - Improved VLUN creation and deletion logic. #1469816
2.0.47 - Changed initialize_connection to use getHostVLUNs. #1475064
2.0.48 - Adding changes to support 3PAR iSCSI multipath.
2.0.49 - Added client CPG stats to driver volume stats. bug #1482741
2.0.50 - Add over subscription support
2.0.51 - Adds consistency group support
2.0.52 - Added update_migrated_volume. bug #1492023
2.0.53 - Fix volume size conversion. bug #1513158
3.0.0 - Rebranded HP to HPE.
3.0.1 - Fixed find_existing_vluns bug #1515033
3.0.2 - Python 3 support
3.0.3 - Remove db access for consistency groups
3.0.4 - Adds v2 managed replication support
3.0.5 - Adds v2 unmanaged replication support
3.0.6 - Adding manage/unmanage snapshot support
3.0.7 - Enable standard capabilities based on 3PAR licenses
3.0.8 - Optimize array ID retrieval
3.0.9 - Bump minimum API version for volume replication
3.0.10 - Added additional volumes checks to the manage snapshot API
3.0.11 - Fix the image cache capability bug #1491088
3.0.12 - Remove client version checks for replication
3.0.13 - Support creating a cg from a source cg
3.0.14 - Comparison of WWNs now handles case difference. bug #1546453
3.0.15 - Update replication to version 2.1
3.0.16 - Use same LUN ID for each VLUN path #1551994
3.0.17 - Don't fail on clearing 3PAR object volume key. bug #1546392
3.0.18 - create_cloned_volume account for larger size. bug #1554740
3.0.19 - Remove metadata that tracks the instance ID. bug #1572665
3.0.20 - Fix lun_id of 0 issue. bug #1573298
3.0.21 - Driver no longer fails to initialize if
System Reporter license is missing. bug #1568078
3.0.22 - Rework delete_vlun. Bug #1582922
3.0.23 - Fix CG create failures with long display name or special
characters. bug #1573647
3.0.24 - Fix terminate connection on failover
3.0.25 - Fix delete volume when online clone is active. bug #1349639
3.0.26 - Fix concurrent snapshot delete conflict. bug #1600104
3.0.27 - Fix snapCPG error during backup of attached volume.
Bug #1646396 and also ,Fix backup of attached ISCSI
and CHAP enabled volume.bug #1644238.
3.0.28 - Remove un-necessary snapshot creation of source volume
while doing online copy in create_cloned_volume call.
Bug #1661541
3.0.29 - Fix convert snapshot volume to base volume type. bug #1656186
"""
VERSION = "3.0.29"
stats = {}
# TODO(Ramy): move these to the 3PAR Client
VLUN_TYPE_EMPTY = 1
VLUN_TYPE_PORT = 2
VLUN_TYPE_HOST = 3
VLUN_TYPE_MATCHED_SET = 4
VLUN_TYPE_HOST_SET = 5
THIN = 2
DEDUP = 6
CONVERT_TO_THIN = 1
CONVERT_TO_FULL = 2
CONVERT_TO_DEDUP = 3
# v2 replication constants
SYNC = 1
PERIODIC = 2
EXTRA_SPEC_REP_MODE = "replication:mode"
EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period"
RC_ACTION_CHANGE_TO_PRIMARY = 7
DEFAULT_REP_MODE = 'periodic'
DEFAULT_SYNC_PERIOD = 900
RC_GROUP_STARTED = 3
SYNC_STATUS_COMPLETED = 3
FAILBACK_VALUE = 'default'
# License values for reported capabilities
PRIORITY_OPT_LIC = "Priority Optimization"
THIN_PROV_LIC = "Thin Provisioning"
REMOTE_COPY_LIC = "Remote Copy"
SYSTEM_REPORTER_LIC = "System Reporter"
# Valid values for volume type extra specs
# The first value in the list is the default value
valid_prov_values = ['thin', 'full', 'dedup']
valid_persona_values = ['2 - Generic-ALUA',
'1 - Generic',
'3 - Generic-legacy',
'4 - HPUX-legacy',
'5 - AIX-legacy',
'6 - EGENERA',
'7 - ONTAP-legacy',
'8 - VMware',
'9 - OpenVMS',
'10 - HPUX',
'11 - WindowsServer']
hpe_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency',
'priority']
qos_priority_level = {'low': 1, 'normal': 2, 'high': 3}
hpe3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs',
'flash_cache']
def __init__(self, config, active_backend_id=None):
self.config = config
self.client = None
self.uuid = uuid.uuid4()
self._client_conf = {}
self._replication_targets = []
self._replication_enabled = False
self._active_backend_id = active_backend_id
def get_version(self):
return self.VERSION
def check_flags(self, options, required_flags):
for flag in required_flags:
if not getattr(options, flag, None):
msg = _('%s is not set') % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def check_replication_flags(self, options, required_flags):
for flag in required_flags:
if not options.get(flag, None):
msg = (_('%s is not set and is required for the replication '
'device to be valid.') % flag)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
def _create_client(self, timeout=None):
hpe3par_api_url = self._client_conf['hpe3par_api_url']
cl = client.HPE3ParClient(hpe3par_api_url, timeout=timeout)
client_version = hpe3parclient.version
if client_version < MIN_CLIENT_VERSION:
ex_msg = (_('Invalid hpe3parclient version found (%(found)s). '
'Version %(minimum)s or greater required. Run "pip'
' install --upgrade python-3parclient" to upgrade'
' the hpe3parclient.')
% {'found': client_version,
'minimum': MIN_CLIENT_VERSION})
LOG.error(ex_msg)
raise exception.InvalidInput(reason=ex_msg)
return cl
def client_login(self):
try:
LOG.debug("Connecting to 3PAR")
self.client.login(self._client_conf['hpe3par_username'],
self._client_conf['hpe3par_password'])
except hpeexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
{'url': self._client_conf['hpe3par_api_url'], 'err': ex})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
known_hosts_file = CONF.ssh_hosts_key_file
policy = "AutoAddPolicy"
if CONF.strict_ssh_host_key_policy:
policy = "RejectPolicy"
self.client.setSSHOptions(
self._client_conf['san_ip'],
self._client_conf['san_login'],
self._client_conf['san_password'],
port=self._client_conf['san_ssh_port'],
conn_timeout=self._client_conf['ssh_conn_timeout'],
privatekey=self._client_conf['san_private_key'],
missing_key_policy=policy,
known_hosts_file=known_hosts_file)
def client_logout(self):
LOG.debug("Disconnect from 3PAR REST and SSH %s", self.uuid)
self.client.logout()
def _create_replication_client(self, remote_array):
try:
cl = client.HPE3ParClient(remote_array['hpe3par_api_url'])
cl.login(remote_array['hpe3par_username'],
remote_array['hpe3par_password'])
except hpeexceptions.HTTPUnauthorized as ex:
msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") %
{'url': remote_array['hpe3par_api_url'], 'err': ex})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
known_hosts_file = CONF.ssh_hosts_key_file
policy = "AutoAddPolicy"
if CONF.strict_ssh_host_key_policy:
policy = "RejectPolicy"
cl.setSSHOptions(
remote_array['san_ip'],
remote_array['san_login'],
remote_array['san_password'],
port=remote_array['san_ssh_port'],
conn_timeout=remote_array['ssh_conn_timeout'],
privatekey=remote_array['san_private_key'],
missing_key_policy=policy,
known_hosts_file=known_hosts_file)
return cl
def _destroy_replication_client(self, client):
if client is not None:
client.logout()
def do_setup(self, context, timeout=None, stats=None):
if hpe3parclient is None:
msg = _('You must install hpe3parclient before using 3PAR'
' drivers. Run "pip install python-3parclient" to'
' install the hpe3parclient.')
raise exception.VolumeBackendAPIException(data=msg)
try:
# This will set self._client_conf with the proper credentials
# to communicate with the 3PAR array. It will contain either
# the values for the primary array or secondary array in the
# case of a fail-over.
self._get_3par_config()
self.client = self._create_client(timeout=timeout)
wsapi_version = self.client.getWsApiVersion()
self.API_VERSION = wsapi_version['build']
# If replication is properly configured, the primary array's
# API version must meet the minimum requirements.
if self._replication_enabled and (
self.API_VERSION < REMOTE_COPY_API_VERSION):
self._replication_enabled = False
msg = (_LE("The primary array must have an API version of "
"%(min_ver)s or higher, but is only on "
"%(current_ver)s, therefore replication is not "
"supported.") %
{'min_ver': REMOTE_COPY_API_VERSION,
'current_ver': self.API_VERSION})
LOG.error(msg)
except hpeexceptions.UnsupportedVersion as ex:
# In the event we cannot contact the configured primary array,
# we want to allow a failover if replication is enabled.
self._do_replication_setup()
if self._replication_enabled:
self.client = None
raise exception.InvalidInput(ex)
if context:
# The context is None except at driver startup.
LOG.info(_LI("HPE3PARCommon %(common_ver)s,"
"hpe3parclient %(rest_ver)s"),
{"common_ver": self.VERSION,
"rest_ver": hpe3parclient.get_version_string()})
if self.config.hpe3par_debug:
self.client.debug_rest(True)
if self.API_VERSION < SRSTATLD_API_VERSION:
# Firmware version not compatible with srstatld
LOG.warning(_LW("srstatld requires "
"WSAPI version '%(srstatld_version)s' "
"version '%(version)s' is installed.") %
{'srstatld_version': SRSTATLD_API_VERSION,
'version': self.API_VERSION})
# Get the client ID for provider_location. We only need to retrieve
# the ID directly from the array if the driver stats are not provided.
if not stats:
try:
self.client_login()
info = self.client.getStorageSystemInfo()
self.client.id = six.text_type(info['id'])
except Exception:
self.client.id = 0
finally:
self.client_logout()
else:
self.client.id = stats['array_id']
def check_for_setup_error(self):
if self.client:
self.client_login()
try:
cpg_names = self._client_conf['hpe3par_cpg']
for cpg_name in cpg_names:
self.validate_cpg(cpg_name)
finally:
self.client_logout()
def validate_cpg(self, cpg_name):
try:
self.client.getCPG(cpg_name)
except hpeexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
def get_domain(self, cpg_name):
try:
cpg = self.client.getCPG(cpg_name)
except hpeexceptions.HTTPNotFound:
err = (_("Failed to get domain because CPG (%s) doesn't "
"exist on array.") % cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
if 'domain' in cpg:
return cpg['domain']
return None
def extend_volume(self, volume, new_size):
volume_name = self._get_3par_vol_name(volume['id'])
old_size = volume['size']
growth_size = int(new_size) - old_size
LOG.debug("Extending Volume %(vol)s from %(old)s to %(new)s, "
" by %(diff)s GB.",
{'vol': volume_name, 'old': old_size, 'new': new_size,
'diff': growth_size})
growth_size_mib = growth_size * units.Ki
self._extend_volume(volume, volume_name, growth_size_mib)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
pool = volume_utils.extract_host(group.host, level='pool')
domain = self.get_domain(pool)
cg_name = self._get_3par_vvs_name(group.id)
extra = {'consistency_group_id': group.id}
if group.cgsnapshot_id:
extra['cgsnapshot_id'] = group.cgsnapshot_id
self.client.createVolumeSet(cg_name, domain=domain,
comment=six.text_type(extra))
model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE}
return model_update
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None,
source_cg=None, source_vols=None):
self.create_consistencygroup(context, group)
vvs_name = self._get_3par_vvs_name(group.id)
if cgsnapshot and snapshots:
cgsnap_name = self._get_3par_snap_name(cgsnapshot.id)
snap_base = cgsnap_name
elif source_cg and source_vols:
cg_id = source_cg.id
# Create a brand new uuid for the temp snap.
snap_uuid = uuid.uuid4().hex
# Create a temporary snapshot of the volume set in order to
# perform an online copy. These temp snapshots will be deleted
# when the source consistency group is deleted.
temp_snap = self._get_3par_snap_name(snap_uuid, temp_snap=True)
snap_shot_name = temp_snap + "-@count@"
copy_of_name = self._get_3par_vvs_name(cg_id)
optional = {'expirationHours': 1}
self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name,
optional=optional)
snap_base = temp_snap
for i, volume in enumerate(volumes):
snap_name = snap_base + "-" + six.text_type(i)
volume_name = self._get_3par_vol_name(volume['id'])
type_info = self.get_volume_settings_from_type(volume)
cpg = type_info['cpg']
tpvv = type_info.get('tpvv', False)
tdvv = type_info.get('tdvv', False)
optional = {'online': True, 'snapCPG': cpg,
'tpvv': tpvv, 'tdvv': tdvv}
self.client.copyVolume(snap_name, volume_name, cpg, optional)
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
return None, None
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
try:
cg_name = self._get_3par_vvs_name(group.id)
self.client.deleteVolumeSet(cg_name)
except hpeexceptions.HTTPNotFound:
err = (_LW("Virtual Volume Set '%s' doesn't exist on array.") %
cg_name)
LOG.warning(err)
except hpeexceptions.HTTPConflict as e:
err = (_LE("Conflict detected in Virtual Volume Set"
" %(volume_set)s: %(error)s"))
LOG.error(err,
{"volume_set": cg_name,
"error": e})
volume_model_updates = []
for volume in volumes:
volume_update = {'id': volume.id}
try:
self.delete_volume(volume)
volume_update['status'] = 'deleted'
except Exception as ex:
LOG.error(_LE("There was an error deleting volume %(id)s: "
"%(error)s."),
{'id': volume.id,
'error': six.text_type(ex)})
volume_update['status'] = 'error'
volume_model_updates.append(volume_update)
model_update = {'status': group.status}
return model_update, volume_model_updates
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
volume_set_name = self._get_3par_vvs_name(group.id)
for volume in add_volumes:
volume_name = self._get_3par_vol_name(volume['id'])
try:
self.client.addVolumeToVolumeSet(volume_set_name, volume_name)
except hpeexceptions.HTTPNotFound:
msg = (_LE('Virtual Volume Set %s does not exist.') %
volume_set_name)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
for volume in remove_volumes:
volume_name = self._get_3par_vol_name(volume['id'])
try:
self.client.removeVolumeFromVolumeSet(
volume_set_name, volume_name)
except hpeexceptions.HTTPNotFound:
msg = (_LE('Virtual Volume Set %s does not exist.') %
volume_set_name)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return None, None, None
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
cg_id = cgsnapshot.consistencygroup_id
snap_shot_name = self._get_3par_snap_name(cgsnapshot.id) + (
"-@count@")
copy_of_name = self._get_3par_vvs_name(cg_id)
extra = {'cgsnapshot_id': cgsnapshot.id}
extra['consistency_group_id'] = cg_id
extra['description'] = cgsnapshot.description
optional = {'comment': json.dumps(extra),
'readOnly': False}
if self.config.hpe3par_snapshot_expiration:
optional['expirationHours'] = (
int(self.config.hpe3par_snapshot_expiration))
if self.config.hpe3par_snapshot_retention:
optional['retentionHours'] = (
int(self.config.hpe3par_snapshot_retention))
try:
self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name,
optional=optional)
except Exception as ex:
msg = (_('There was an error creating the cgsnapshot: %s'),
six.text_type(ex))
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_update = {'id': snapshot['id'],
'status': fields.SnapshotStatus.AVAILABLE}
snapshot_model_updates.append(snapshot_update)
model_update = {'status': 'available'}
return model_update, snapshot_model_updates
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
cgsnap_name = self._get_3par_snap_name(cgsnapshot.id)
snapshot_model_updates = []
for i, snapshot in enumerate(snapshots):
snapshot_update = {'id': snapshot['id']}
try:
snap_name = cgsnap_name + "-" + six.text_type(i)
self.client.deleteVolume(snap_name)
snapshot_update['status'] = fields.SnapshotStatus.DELETED
except hpeexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
LOG.warning(_LW("Delete Snapshot id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s"),
{'id': snapshot['id'], 'msg': ex})
snapshot_update['status'] = fields.SnapshotStatus.ERROR
except Exception as ex:
LOG.error(_LE("There was an error deleting snapshot %(id)s: "
"%(error)s."),
{'id': snapshot['id'],
'error': six.text_type(ex)})
snapshot_update['status'] = fields.SnapshotStatus.ERROR
snapshot_model_updates.append(snapshot_update)
model_update = {'status': cgsnapshot.status}
return model_update, snapshot_model_updates
def manage_existing(self, volume, existing_ref):
"""Manage an existing 3PAR volume.
existing_ref is a dictionary of the form:
{'source-name': <name of the virtual volume>}
"""
target_vol_name = self._get_existing_volume_ref_name(existing_ref)
# Check for the existence of the virtual volume.
old_comment_str = ""
try:
vol = self.client.getVolume(target_vol_name)
if 'comment' in vol:
old_comment_str = vol['comment']
except hpeexceptions.HTTPNotFound:
err = (_("Virtual volume '%s' doesn't exist on array.") %
target_vol_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
new_comment = {}
# Use the display name from the existing volume if no new name
# was chosen by the user.
if volume['display_name']:
display_name = volume['display_name']
new_comment['display_name'] = volume['display_name']
elif 'comment' in vol:
display_name = self._get_3par_vol_comment_value(vol['comment'],
'display_name')
if display_name:
new_comment['display_name'] = display_name
else:
display_name = None
# Generate the new volume information based on the new ID.
new_vol_name = self._get_3par_vol_name(volume['id'])
name = 'volume-' + volume['id']
new_comment['volume_id'] = volume['id']
new_comment['name'] = name
new_comment['type'] = 'OpenStack'
volume_type = None
if volume['volume_type_id']:
try:
volume_type = self._get_volume_type(volume['volume_type_id'])
except Exception:
reason = (_("Volume type ID '%s' is invalid.") %
volume['volume_type_id'])
raise exception.ManageExistingVolumeTypeMismatch(reason=reason)
new_vals = {'newName': new_vol_name,
'comment': json.dumps(new_comment)}
# Ensure that snapCPG is set
if 'snapCPG' not in vol:
new_vals['snapCPG'] = vol['userCPG']
LOG.info(_LI("Virtual volume %(disp)s '%(new)s' snapCPG "
"is empty so it will be set to: %(cpg)s"),
{'disp': display_name, 'new': new_vol_name,
'cpg': new_vals['snapCPG']})
# Update the existing volume with the new name and comments.
self.client.modifyVolume(target_vol_name, new_vals)
LOG.info(_LI("Virtual volume '%(ref)s' renamed to '%(new)s'."),
{'ref': existing_ref['source-name'], 'new': new_vol_name})
retyped = False
model_update = None
if volume_type:
LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is "
"being retyped."),
{'disp': display_name, 'new': new_vol_name})
try:
retyped, model_update = self._retype_from_no_type(volume,
volume_type)
LOG.info(_LI("Virtual volume %(disp)s successfully retyped to "
"%(new_type)s."),
{'disp': display_name,
'new_type': volume_type.get('name')})
except Exception:
with excutils.save_and_reraise_exception():
LOG.warning(_LW("Failed to manage virtual volume %(disp)s "
"due to error during retype."),
{'disp': display_name})
# Try to undo the rename and clear the new comment.
self.client.modifyVolume(
new_vol_name,
{'newName': target_vol_name,
'comment': old_comment_str})
updates = {'display_name': display_name}
if retyped and model_update:
updates.update(model_update)
LOG.info(_LI("Virtual volume %(disp)s '%(new)s' is "
"now being managed."),
{'disp': display_name, 'new': new_vol_name})
# Return display name to update the name displayed in the GUI and
# any model updates from retype.
return updates
def manage_existing_snapshot(self, snapshot, existing_ref):
"""Manage an existing 3PAR snapshot.
existing_ref is a dictionary of the form:
{'source-name': <name of the snapshot>}
"""
# Potential parent volume for the snapshot
volume = snapshot['volume']
# Do not allow for managing of snapshots for 'failed-over' volumes.
if volume.get('replication_status') == 'failed-over':
err = (_("Managing of snapshots to failed-over volumes is "
"not allowed."))
raise exception.InvalidInput(reason=err)
target_snap_name = self._get_existing_volume_ref_name(existing_ref,
is_snapshot=True)
# Check for the existence of the snapshot.
try:
snap = self.client.getVolume(target_snap_name)
except hpeexceptions.HTTPNotFound:
err = (_("Snapshot '%s' doesn't exist on array.") %
target_snap_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
# Make sure the snapshot is being associated with the correct volume.
parent_vol_name = self._get_3par_vol_name(volume['id'])
if parent_vol_name != snap['copyOf']:
err = (_("The provided snapshot '%s' is not a snapshot of "
"the provided volume.") % target_snap_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
new_comment = {}
# Use the display name from the existing snapshot if no new name
# was chosen by the user.
if snapshot['display_name']:
display_name = snapshot['display_name']
new_comment['display_name'] = snapshot['display_name']
elif 'comment' in snap:
display_name = self._get_3par_vol_comment_value(snap['comment'],
'display_name')
if display_name:
new_comment['display_name'] = display_name
else:
display_name = None
# Generate the new snapshot information based on the new ID.
new_snap_name = self._get_3par_snap_name(snapshot['id'])
new_comment['volume_id'] = volume['id']
new_comment['volume_name'] = 'volume-' + volume['id']
if snapshot.get('display_description', None):
new_comment['description'] = snapshot['display_description']
else:
new_comment['description'] = ""
new_vals = {'newName': new_snap_name,
'comment': json.dumps(new_comment)}
# Update the existing snapshot with the new name and comments.
self.client.modifyVolume(target_snap_name, new_vals)
LOG.info(_LI("Snapshot '%(ref)s' renamed to '%(new)s'."),
{'ref': existing_ref['source-name'], 'new': new_snap_name})
updates = {'display_name': display_name}
LOG.info(_LI("Snapshot %(disp)s '%(new)s' is now being managed."),
{'disp': display_name, 'new': new_snap_name})
# Return display name to update the name displayed in the GUI.
return updates
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
existing_ref is a dictionary of the form:
{'source-name': <name of the virtual volume>}
"""
target_vol_name = self._get_existing_volume_ref_name(existing_ref)
# Make sure the reference is not in use.
if re.match('osv-*|oss-*|vvs-*', target_vol_name):
reason = _("Reference must be for an unmanaged virtual volume.")
raise exception.ManageExistingInvalidReference(
existing_ref=target_vol_name,
reason=reason)
# Check for the existence of the virtual volume.
try:
vol = self.client.getVolume(target_vol_name)
except hpeexceptions.HTTPNotFound:
err = (_("Virtual volume '%s' doesn't exist on array.") %
target_vol_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
return int(math.ceil(float(vol['sizeMiB']) / units.Ki))
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
"""Return size of snapshot to be managed by manage_existing_snapshot.
existing_ref is a dictionary of the form:
{'source-name': <name of the snapshot>}
"""
target_snap_name = self._get_existing_volume_ref_name(existing_ref,
is_snapshot=True)
# Make sure the reference is not in use.
if re.match('osv-*|oss-*|vvs-*|unm-*', target_snap_name):
reason = _("Reference must be for an unmanaged snapshot.")
raise exception.ManageExistingInvalidReference(
existing_ref=target_snap_name,
reason=reason)
# Check for the existence of the snapshot.
try:
snap = self.client.getVolume(target_snap_name)
except hpeexceptions.HTTPNotFound:
err = (_("Snapshot '%s' doesn't exist on array.") %
target_snap_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
return int(math.ceil(float(snap['sizeMiB']) / units.Ki))
def unmanage(self, volume):
"""Removes the specified volume from Cinder management."""
# Rename the volume's name to unm-* format so that it can be
# easily found later.
vol_name = self._get_3par_vol_name(volume['id'])
new_vol_name = self._get_3par_unm_name(volume['id'])
self.client.modifyVolume(vol_name, {'newName': new_vol_name})
LOG.info(_LI("Virtual volume %(disp)s '%(vol)s' is no longer managed. "
"Volume renamed to '%(new)s'."),
{'disp': volume['display_name'],
'vol': vol_name,
'new': new_vol_name})
def unmanage_snapshot(self, snapshot):
"""Removes the specified snapshot from Cinder management."""
# Parent volume for the snapshot
volume = snapshot['volume']
# Do not allow unmanaging of snapshots from 'failed-over' volumes.
if volume.get('replication_status') == 'failed-over':
err = (_("Unmanaging of snapshots from failed-over volumes is "
"not allowed."))
LOG.error(err)
# TODO(leeantho) Change this exception to Invalid when the volume
# manager supports handling that.
raise exception.SnapshotIsBusy(snapshot_name=snapshot['id'])
# Rename the snapshots's name to ums-* format so that it can be
# easily found later.
snap_name = self._get_3par_snap_name(snapshot['id'])
new_snap_name = self._get_3par_ums_name(snapshot['id'])
self.client.modifyVolume(snap_name, {'newName': new_snap_name})
LOG.info(_LI("Snapshot %(disp)s '%(vol)s' is no longer managed. "
"Snapshot renamed to '%(new)s'."),
{'disp': snapshot['display_name'],
'vol': snap_name,
'new': new_snap_name})
def _get_existing_volume_ref_name(self, existing_ref, is_snapshot=False):
"""Returns the volume name of an existing reference.
Checks if an existing volume reference has a source-name or
source-id element. If source-name or source-id is not present an
error will be thrown.
"""
vol_name = None
if 'source-name' in existing_ref:
vol_name = existing_ref['source-name']
elif 'source-id' in existing_ref:
if is_snapshot:
vol_name = self._get_3par_ums_name(existing_ref['source-id'])
else:
vol_name = self._get_3par_unm_name(existing_ref['source-id'])
else:
reason = _("Reference must contain source-name or source-id.")
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref,
reason=reason)
return vol_name
def _extend_volume(self, volume, volume_name, growth_size_mib,
_convert_to_base=False):
model_update = None
rcg_name = self._get_3par_rcg_name(volume['id'])
is_volume_replicated = self._volume_of_replicated_type(volume)
try:
if _convert_to_base:
LOG.debug("Converting to base volume prior to growing.")
model_update = self._convert_to_base_volume(volume)
# If the volume is replicated and we are not failed over,
# remote copy has to be stopped before the volume can be extended.
failed_over = volume.get("replication_status", None)
is_failed_over = failed_over == "failed-over"
if is_volume_replicated and not is_failed_over:
self.client.stopRemoteCopy(rcg_name)
self.client.growVolume(volume_name, growth_size_mib)
if is_volume_replicated and not is_failed_over:
self.client.startRemoteCopy(rcg_name)
except Exception as ex:
# If the extend fails, we must restart remote copy.
if is_volume_replicated:
self.client.startRemoteCopy(rcg_name)
with excutils.save_and_reraise_exception() as ex_ctxt:
if (not _convert_to_base and
isinstance(ex, hpeexceptions.HTTPForbidden) and
ex.get_code() == 150):
# Error code 150 means 'invalid operation: Cannot grow
# this type of volume'.
# Suppress raising this exception because we can
# resolve it by converting it into a base volume.
# Afterwards, extending the volume should succeed, or
# fail with a different exception/error code.
ex_ctxt.reraise = False
model_update = self._extend_volume(
volume, volume_name,
growth_size_mib,
_convert_to_base=True)
else:
LOG.error(_LE("Error extending volume: %(vol)s. "
"Exception: %(ex)s"),
{'vol': volume_name, 'ex': ex})
return model_update
def _get_3par_vol_name(self, volume_id):
"""Get converted 3PAR volume name.
Converts the openstack volume id from
ecffc30f-98cb-4cf5-85ee-d7309cc17cd2
to
osv-7P.DD5jLTPWF7tcwnMF80g
We convert the 128 bits of the uuid into a 24character long
base64 encoded string to ensure we don't exceed the maximum
allowed 31 character name limit on 3Par
We strip the padding '=' and replace + with .
and / with -
"""
volume_name = self._encode_name(volume_id)
return "osv-%s" % volume_name
def _get_3par_snap_name(self, snapshot_id, temp_snap=False):
snapshot_name = self._encode_name(snapshot_id)
if temp_snap:
# is this a temporary snapshot
# this is done during cloning
prefix = "tss-%s"
else:
prefix = "oss-%s"
return prefix % snapshot_name
def _get_3par_ums_name(self, snapshot_id):
ums_name = self._encode_name(snapshot_id)
return "ums-%s" % ums_name
def _get_3par_vvs_name(self, volume_id):
vvs_name = self._encode_name(volume_id)
return "vvs-%s" % vvs_name
def _get_3par_unm_name(self, volume_id):
unm_name = self._encode_name(volume_id)
return "unm-%s" % unm_name
# v2 replication conversion
def _get_3par_rcg_name(self, volume_id):
rcg_name = self._encode_name(volume_id)
rcg = "rcg-%s" % rcg_name
return rcg[:22]
def _get_3par_remote_rcg_name(self, volume_id, provider_location):
return self._get_3par_rcg_name(volume_id) + ".r" + (
six.text_type(provider_location))
def _encode_name(self, name):
uuid_str = name.replace("-", "")
vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str)
vol_encoded = base64.encode_as_text(vol_uuid.bytes)
# 3par doesn't allow +, nor /
vol_encoded = vol_encoded.replace('+', '.')
vol_encoded = vol_encoded.replace('/', '-')
# strip off the == as 3par doesn't like those.
vol_encoded = vol_encoded.replace('=', '')
return vol_encoded
def _capacity_from_size(self, vol_size):
# because 3PAR volume sizes are in Mebibytes.
if int(vol_size) == 0:
capacity = units.Gi # default: 1GiB
else:
capacity = vol_size * units.Gi
capacity = int(math.ceil(capacity / units.Mi))
return capacity
def _delete_3par_host(self, hostname):
self.client.deleteHost(hostname)
def _create_3par_vlun(self, volume, hostname, nsp, lun_id=None):
try:
location = None
auto = True
if lun_id is not None:
auto = False
if nsp is None:
location = self.client.createVLUN(volume, hostname=hostname,
auto=auto, lun=lun_id)
else:
port = self.build_portPos(nsp)
location = self.client.createVLUN(volume, hostname=hostname,
auto=auto, portPos=port,
lun=lun_id)
vlun_info = None
if location:
# The LUN id is returned as part of the location URI
vlun = location.split(',')
vlun_info = {'volume_name': vlun[0],
'lun_id': int(vlun[1]),
'host_name': vlun[2],
}
if len(vlun) > 3:
vlun_info['nsp'] = vlun[3]
return vlun_info
except hpeexceptions.HTTPBadRequest as e:
if 'must be in the same domain' in e.get_description():
LOG.error(e.get_description())
raise exception.Invalid3PARDomain(err=e.get_description())
else:
raise exception.VolumeBackendAPIException(
data=e.get_description())
def _safe_hostname(self, hostname):
"""We have to use a safe hostname length for 3PAR host names."""
try:
index = hostname.index('.')
except ValueError:
# couldn't find it
index = len(hostname)
# we'll just chop this off for now.
if index > 31:
index = 31
return hostname[:index]
def _get_3par_host(self, hostname):
return self.client.getHost(hostname)
def get_ports(self):
return self.client.getPorts()
def get_active_target_ports(self):
ports = self.get_ports()
target_ports = []
for port in ports['members']:
if (
port['mode'] == self.client.PORT_MODE_TARGET and
port['linkState'] == self.client.PORT_STATE_READY
):
port['nsp'] = self.build_nsp(port['portPos'])
target_ports.append(port)
return target_ports
def get_active_fc_target_ports(self):
ports = self.get_active_target_ports()
fc_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_FC:
fc_ports.append(port)
return fc_ports
def get_active_iscsi_target_ports(self):
ports = self.get_active_target_ports()
iscsi_ports = []
for port in ports:
if port['protocol'] == self.client.PORT_PROTO_ISCSI:
iscsi_ports.append(port)
return iscsi_ports
def get_volume_stats(self,
refresh,
filter_function=None,
goodness_function=None):
if refresh:
self._update_volume_stats(
filter_function=filter_function,
goodness_function=goodness_function)
return self.stats
def _update_volume_stats(self,
filter_function=None,
goodness_function=None):
# const to convert MiB to GB
const = 0.0009765625
# storage_protocol and volume_backend_name are
# set in the child classes
pools = []
info = self.client.getStorageSystemInfo()
qos_support = True
thin_support = True
remotecopy_support = True
sr_support = True
if 'licenseInfo' in info:
if 'licenses' in info['licenseInfo']:
valid_licenses = info['licenseInfo']['licenses']
qos_support = self._check_license_enabled(
valid_licenses, self.PRIORITY_OPT_LIC,
"QoS_support")
thin_support = self._check_license_enabled(
valid_licenses, self.THIN_PROV_LIC,
"Thin_provisioning_support")
remotecopy_support = self._check_license_enabled(
valid_licenses, self.REMOTE_COPY_LIC,
"Replication")
sr_support = self._check_license_enabled(
valid_licenses, self.SYSTEM_REPORTER_LIC,
"System_reporter_support")
for cpg_name in self._client_conf['hpe3par_cpg']:
try:
stat_capabilities = {
THROUGHPUT: None,
BANDWIDTH: None,
LATENCY: None,
IO_SIZE: None,
QUEUE_LENGTH: None,
AVG_BUSY_PERC: None
}
cpg = self.client.getCPG(cpg_name)
if (self.API_VERSION >= SRSTATLD_API_VERSION and sr_support):
interval = 'daily'
history = '7d'
try:
stat_capabilities = self.client.getCPGStatData(
cpg_name,
interval,
history)
except Exception as ex:
LOG.warning(_LW("Exception at getCPGStatData() "
"for cpg: '%(cpg_name)s' "
"Reason: '%(reason)s'") %
{'cpg_name': cpg_name, 'reason': ex})
if 'numTDVVs' in cpg:
total_volumes = int(
cpg['numFPVVs'] + cpg['numTPVVs'] + cpg['numTDVVs']
)
else:
total_volumes = int(
cpg['numFPVVs'] + cpg['numTPVVs']
)
if 'limitMiB' not in cpg['SDGrowth']:
# cpg usable free space
cpg_avail_space = (
self.client.getCPGAvailableSpace(cpg_name))
free_capacity = int(
cpg_avail_space['usableFreeMiB'] * const)
# total_capacity is the best we can do for a limitless cpg
total_capacity = int(
(cpg['SDUsage']['usedMiB'] +
cpg['UsrUsage']['usedMiB'] +
cpg_avail_space['usableFreeMiB']) * const)
else:
total_capacity = int(cpg['SDGrowth']['limitMiB'] * const)
free_capacity = int((cpg['SDGrowth']['limitMiB'] -
(cpg['UsrUsage']['usedMiB'] +
cpg['SDUsage']['usedMiB'])) * const)
capacity_utilization = (
(float(total_capacity - free_capacity) /
float(total_capacity)) * 100)
provisioned_capacity = int((cpg['UsrUsage']['totalMiB'] +
cpg['SAUsage']['totalMiB'] +
cpg['SDUsage']['totalMiB']) *
const)
except hpeexceptions.HTTPNotFound:
err = (_("CPG (%s) doesn't exist on array")
% cpg_name)
LOG.error(err)
raise exception.InvalidInput(reason=err)
pool = {'pool_name': cpg_name,
'total_capacity_gb': total_capacity,
'free_capacity_gb': free_capacity,
'provisioned_capacity_gb': provisioned_capacity,
'QoS_support': qos_support,
'thin_provisioning_support': thin_support,
'thick_provisioning_support': True,
'max_over_subscription_ratio': (
self.config.safe_get('max_over_subscription_ratio')),
'reserved_percentage': (
self.config.safe_get('reserved_percentage')),
'location_info': ('HPE3PARDriver:%(sys_id)s:%(dest_cpg)s' %
{'sys_id': info['serialNumber'],
'dest_cpg': cpg_name}),
'total_volumes': total_volumes,
'capacity_utilization': capacity_utilization,
THROUGHPUT: stat_capabilities[THROUGHPUT],
BANDWIDTH: stat_capabilities[BANDWIDTH],
LATENCY: stat_capabilities[LATENCY],
IO_SIZE: stat_capabilities[IO_SIZE],
QUEUE_LENGTH: stat_capabilities[QUEUE_LENGTH],
AVG_BUSY_PERC: stat_capabilities[AVG_BUSY_PERC],
'filter_function': filter_function,
'goodness_function': goodness_function,
'multiattach': True,
'consistencygroup_support': True,
}
if remotecopy_support:
pool['replication_enabled'] = self._replication_enabled
pool['replication_type'] = ['sync', 'periodic']
pool['replication_count'] = len(self._replication_targets)
pools.append(pool)
self.stats = {'driver_version': '3.0',
'storage_protocol': None,
'vendor_name': 'Hewlett Packard Enterprise',
'volume_backend_name': None,
'array_id': info['id'],
'replication_enabled': self._replication_enabled,
'replication_targets': self._get_replication_targets(),
'pools': pools}
def _check_license_enabled(self, valid_licenses,
license_to_check, capability):
"""Check a license against valid licenses on the array."""
if valid_licenses:
for license in valid_licenses:
if license_to_check in license.get('name'):
return True
LOG.debug(("'%(capability)s' requires a '%(license)s' "
"license which is not installed.") %
{'capability': capability,
'license': license_to_check})
return False
def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None):
"""find a VLUN on a 3PAR host."""
vluns = self.client.getHostVLUNs(hostname)
found_vlun = None
for vlun in vluns:
if volume_name in vlun['volumeName']:
if lun_id is not None:
if vlun['lun'] == lun_id:
if nsp:
port = self.build_portPos(nsp)
if vlun['portPos'] == port:
found_vlun = vlun
break
else:
found_vlun = vlun
break
else:
found_vlun = vlun
break
if found_vlun is None:
LOG.info(_LI("3PAR vlun %(name)s not found on host %(host)s"),
{'name': volume_name, 'host': hostname})
return found_vlun
def create_vlun(self, volume, host, nsp=None, lun_id=None):
"""Create a VLUN.
In order to export a volume on a 3PAR box, we have to create a VLUN.
"""
volume_name = self._get_3par_vol_name(volume['id'])
vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp,
lun_id=lun_id)
return self._get_vlun(volume_name,
host['name'],
vlun_info['lun_id'],
nsp)
def delete_vlun(self, volume, hostname):
volume_name = self._get_3par_vol_name(volume['id'])
vluns = self.client.getHostVLUNs(hostname)
# When deleteing VLUNs, you simply need to remove the template VLUN
# and any active VLUNs will be automatically removed. The template
# VLUN are marked as active: False
volume_vluns = []
for vlun in vluns:
if volume_name in vlun['volumeName']:
# template VLUNs are 'active' = False
if not vlun['active']:
volume_vluns.append(vlun)
if not volume_vluns:
msg = (
_LW("3PAR vlun for volume %(name)s not found on "
"host %(host)s"), {'name': volume_name, 'host': hostname})
LOG.warning(msg)
return
# VLUN Type of MATCHED_SET 4 requires the port to be provided
for vlun in volume_vluns:
if 'portPos' in vlun:
self.client.deleteVLUN(volume_name, vlun['lun'],
hostname=hostname,
port=vlun['portPos'])
else:
self.client.deleteVLUN(volume_name, vlun['lun'],
hostname=hostname)
# Determine if there are other volumes attached to the host.
# This will determine whether we should try removing host from host set
# and deleting the host.
vluns = []
try:
vluns = self.client.getHostVLUNs(hostname)
except hpeexceptions.HTTPNotFound:
LOG.debug("All VLUNs removed from host %s", hostname)
pass
for vlun in vluns:
if volume_name not in vlun['volumeName']:
# Found another volume
break
else:
# We deleted the last vlun, so try to delete the host too.
# This check avoids the old unnecessary try/fail when vluns exist
# but adds a minor race condition if a vlun is manually deleted
# externally at precisely the wrong time. Worst case is leftover
# host, so it is worth the unlikely risk.
try:
self._delete_3par_host(hostname)
except Exception as ex:
# Any exception down here is only logged. The vlun is deleted.
# If the host is in a host set, the delete host will fail and
# the host will remain in the host set. This is desired
# because cinder was not responsible for the host set
# assignment. The host set could be used outside of cinder
# for future needs (e.g. export volume to host set).
# The log info explains why the host was left alone.
LOG.info(_LI("3PAR vlun for volume '%(name)s' was deleted, "
"but the host '%(host)s' was not deleted "
"because: %(reason)s"),
{'name': volume_name, 'host': hostname,
'reason': ex.get_description()})
def _get_volume_type(self, type_id):
ctxt = context.get_admin_context()
return volume_types.get_volume_type(ctxt, type_id)
def _get_key_value(self, hpe3par_keys, key, default=None):
if hpe3par_keys is not None and key in hpe3par_keys:
return hpe3par_keys[key]
else:
return default
def _get_qos_value(self, qos, key, default=None):
if key in qos:
return qos[key]
else:
return default
def _get_qos_by_volume_type(self, volume_type):
qos = {}
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
# NOTE(kmartin): We prefer the qos_specs association
# and override any existing extra-specs settings
# if present.
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(context.get_admin_context(),
qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.items():
if 'qos:' in key:
fields = key.split(':')
key = fields[1]
if key in self.hpe_qos_keys:
qos[key] = value
return qos
def _get_keys_by_volume_type(self, volume_type):
hpe3par_keys = {}
specs = volume_type.get('extra_specs')
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.hpe3par_valid_keys:
hpe3par_keys[key] = value
return hpe3par_keys
def _set_qos_rule(self, qos, vvs_name):
min_io = self._get_qos_value(qos, 'minIOPS')
max_io = self._get_qos_value(qos, 'maxIOPS')
min_bw = self._get_qos_value(qos, 'minBWS')
max_bw = self._get_qos_value(qos, 'maxBWS')
latency = self._get_qos_value(qos, 'latency')
priority = self._get_qos_value(qos, 'priority', 'normal')
qosRule = {}
if min_io:
qosRule['ioMinGoal'] = int(min_io)
if max_io is None:
qosRule['ioMaxLimit'] = int(min_io)
if max_io:
qosRule['ioMaxLimit'] = int(max_io)
if min_io is None:
qosRule['ioMinGoal'] = int(max_io)
if min_bw:
qosRule['bwMinGoalKB'] = int(min_bw) * units.Ki
if max_bw is None:
qosRule['bwMaxLimitKB'] = int(min_bw) * units.Ki
if max_bw:
qosRule['bwMaxLimitKB'] = int(max_bw) * units.Ki
if min_bw is None:
qosRule['bwMinGoalKB'] = int(max_bw) * units.Ki
if latency:
qosRule['latencyGoal'] = int(latency)
if priority:
qosRule['priority'] = self.qos_priority_level.get(priority.lower())
try:
self.client.createQoSRules(vvs_name, qosRule)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating QOS rule %s"), qosRule)
def get_flash_cache_policy(self, hpe3par_keys):
if hpe3par_keys is not None:
# First check list of extra spec keys
val = self._get_key_value(hpe3par_keys, 'flash_cache', None)
if val is not None:
# If requested, see if supported on back end
if self.API_VERSION < FLASH_CACHE_API_VERSION:
err = (_("Flash Cache Policy requires "
"WSAPI version '%(fcache_version)s' "
"version '%(version)s' is installed.") %
{'fcache_version': FLASH_CACHE_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
else:
if val.lower() == 'true':
return self.client.FLASH_CACHE_ENABLED
else:
return self.client.FLASH_CACHE_DISABLED
return None
def _set_flash_cache_policy_in_vvs(self, flash_cache, vvs_name):
# Update virtual volume set
if flash_cache:
try:
self.client.modifyVolumeSet(vvs_name,
flashCachePolicy=flash_cache)
LOG.info(_LI("Flash Cache policy set to %s"), flash_cache)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error setting Flash Cache policy "
"to %s - exception"), flash_cache)
def _add_volume_to_volume_set(self, volume, volume_name,
cpg, vvs_name, qos, flash_cache):
if vvs_name is not None:
# Admin has set a volume set name to add the volume to
try:
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
except hpeexceptions.HTTPNotFound:
msg = _('VV Set %s does not exist.') % vvs_name
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
else:
vvs_name = self._get_3par_vvs_name(volume['id'])
domain = self.get_domain(cpg)
self.client.createVolumeSet(vvs_name, domain)
try:
self._set_qos_rule(qos, vvs_name)
self._set_flash_cache_policy_in_vvs(flash_cache, vvs_name)
self.client.addVolumeToVolumeSet(vvs_name, volume_name)
except Exception as ex:
# Cleanup the volume set if unable to create the qos rule
# or flash cache policy or add the volume to the volume set
self.client.deleteVolumeSet(vvs_name)
raise exception.CinderException(ex)
def get_cpg(self, volume, allowSnap=False):
volume_name = self._get_3par_vol_name(volume['id'])
vol = self.client.getVolume(volume_name)
# Search for 'userCPG' in the get volume REST API,
# if found return userCPG , else search for snapCPG attribute
# when allowSnap=True. For the cases where 3PAR REST call for
# get volume doesn't have either userCPG or snapCPG ,
# take the default value of cpg from 'host' attribute from volume param
LOG.debug("get volume response is: %s", vol)
if 'userCPG' in vol:
return vol['userCPG']
elif allowSnap and 'snapCPG' in vol:
return vol['snapCPG']
else:
return volume_utils.extract_host(volume['host'], 'pool')
def _get_3par_vol_comment(self, volume_name):
vol = self.client.getVolume(volume_name)
if 'comment' in vol:
return vol['comment']
return None
def validate_persona(self, persona_value):
"""Validate persona value.
If the passed in persona_value is not valid, raise InvalidInput,
otherwise return the persona ID.
:param persona_value:
:raises: exception.InvalidInput
:returns: persona ID
"""
if persona_value not in self.valid_persona_values:
err = (_("Must specify a valid persona %(valid)s,"
"value '%(persona)s' is invalid.") %
{'valid': self.valid_persona_values,
'persona': persona_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
# persona is set by the id so remove the text and return the id
# i.e for persona '1 - Generic' returns 1
persona_id = persona_value.split(' ')
return persona_id[0]
def get_persona_type(self, volume, hpe3par_keys=None):
default_persona = self.valid_persona_values[0]
type_id = volume.get('volume_type_id', None)
if type_id is not None:
volume_type = self._get_volume_type(type_id)
if hpe3par_keys is None:
hpe3par_keys = self._get_keys_by_volume_type(volume_type)
persona_value = self._get_key_value(hpe3par_keys, 'persona',
default_persona)
return self.validate_persona(persona_value)
def get_type_info(self, type_id):
"""Get 3PAR type info for the given type_id.
Reconciles VV Set, old-style extra-specs, and QOS specs
and returns commonly used info about the type.
:returns: hpe3par_keys, qos, volume_type, vvs_name
"""
volume_type = None
vvs_name = None
hpe3par_keys = {}
qos = {}
if type_id is not None:
volume_type = self._get_volume_type(type_id)
hpe3par_keys = self._get_keys_by_volume_type(volume_type)
vvs_name = self._get_key_value(hpe3par_keys, 'vvs')
if vvs_name is None:
qos = self._get_qos_by_volume_type(volume_type)
return hpe3par_keys, qos, volume_type, vvs_name
def get_volume_settings_from_type_id(self, type_id, pool):
"""Get 3PAR volume settings given a type_id.
Combines type info and config settings to return a dictionary
describing the 3PAR volume settings. Does some validation (CPG).
Uses pool as the default cpg (when not specified in volume type specs).
:param type_id: id of type to get settings for
:param pool: CPG to use if type does not have one set
:returns: dict
"""
hpe3par_keys, qos, volume_type, vvs_name = self.get_type_info(type_id)
# Default to pool extracted from host.
# If that doesn't work use the 1st CPG in the config as the default.
default_cpg = pool or self._client_conf['hpe3par_cpg'][0]
cpg = self._get_key_value(hpe3par_keys, 'cpg', default_cpg)
if cpg is not default_cpg:
# The cpg was specified in a volume type extra spec so it
# needs to be validated that it's in the correct domain.
# log warning here
msg = _LW("'hpe3par:cpg' is not supported as an extra spec "
"in a volume type. CPG's are chosen by "
"the cinder scheduler, as a pool, from the "
"cinder.conf entry 'hpe3par_cpg', which can "
"be a list of CPGs.")
versionutils.report_deprecated_feature(LOG, msg)
LOG.info(_LI("Using pool %(pool)s instead of %(cpg)s"),
{'pool': pool, 'cpg': cpg})
cpg = pool
self.validate_cpg(cpg)
# Look to see if the snap_cpg was specified in volume type
# extra spec, if not use hpe3par_cpg_snap from config as the
# default.
snap_cpg = self.config.hpe3par_cpg_snap
snap_cpg = self._get_key_value(hpe3par_keys, 'snap_cpg', snap_cpg)
# If it's still not set or empty then set it to the cpg.
if not snap_cpg:
snap_cpg = cpg
# if provisioning is not set use thin
default_prov = self.valid_prov_values[0]
prov_value = self._get_key_value(hpe3par_keys, 'provisioning',
default_prov)
# check for valid provisioning type
if prov_value not in self.valid_prov_values:
err = (_("Must specify a valid provisioning type %(valid)s, "
"value '%(prov)s' is invalid.") %
{'valid': self.valid_prov_values,
'prov': prov_value})
LOG.error(err)
raise exception.InvalidInput(reason=err)
tpvv = True
tdvv = False
if prov_value == "full":
tpvv = False
elif prov_value == "dedup":
tpvv = False
tdvv = True
if tdvv and (self.API_VERSION < DEDUP_API_VERSION):
err = (_("Dedup is a valid provisioning type, "
"but requires WSAPI version '%(dedup_version)s' "
"version '%(version)s' is installed.") %
{'dedup_version': DEDUP_API_VERSION,
'version': self.API_VERSION})
LOG.error(err)
raise exception.InvalidInput(reason=err)
return {'hpe3par_keys': hpe3par_keys,
'cpg': cpg, 'snap_cpg': snap_cpg,
'vvs_name': vvs_name, 'qos': qos,
'tpvv': tpvv, 'tdvv': tdvv, 'volume_type': volume_type}
def get_volume_settings_from_type(self, volume, host=None):
"""Get 3PAR volume settings given a volume.
Combines type info and config settings to return a dictionary
describing the 3PAR volume settings. Does some validation (CPG and
persona).
:param volume:
:param host: Optional host to use for default pool.
:returns: dict
"""
type_id = volume.get('volume_type_id', None)
pool = None
if host:
pool = volume_utils.extract_host(host['host'], 'pool')
else:
pool = volume_utils.extract_host(volume['host'], 'pool')
volume_settings = self.get_volume_settings_from_type_id(type_id, pool)
# check for valid persona even if we don't use it until
# attach time, this will give the end user notice that the
# persona type is invalid at volume creation time
self.get_persona_type(volume, volume_settings['hpe3par_keys'])
return volume_settings
def create_volume(self, volume):
LOG.debug('CREATE VOLUME (%(disp_name)s: %(vol_name)s %(id)s on '
'%(host)s)',
{'disp_name': volume['display_name'],
'vol_name': volume['name'],
'id': self._get_3par_vol_name(volume['id']),
'host': volume['host']})
try:
comments = {'volume_id': volume['id'],
'name': volume['name'],
'type': 'OpenStack'}
name = volume.get('display_name', None)
if name:
comments['display_name'] = name
# get the options supported by volume types
type_info = self.get_volume_settings_from_type(volume)
volume_type = type_info['volume_type']
vvs_name = type_info['vvs_name']
qos = type_info['qos']
cpg = type_info['cpg']
snap_cpg = type_info['snap_cpg']
tpvv = type_info['tpvv']
tdvv = type_info['tdvv']
flash_cache = self.get_flash_cache_policy(
type_info['hpe3par_keys'])
cg_id = volume.get('consistencygroup_id', None)
if cg_id:
vvs_name = self._get_3par_vvs_name(cg_id)
type_id = volume.get('volume_type_id', None)
if type_id is not None:
comments['volume_type_name'] = volume_type.get('name')
comments['volume_type_id'] = type_id
if vvs_name is not None:
comments['vvs'] = vvs_name
else:
comments['qos'] = qos
extras = {'comment': json.dumps(comments),
'snapCPG': snap_cpg,
'tpvv': tpvv}
# Only set the dedup option if the backend supports it.
if self.API_VERSION >= DEDUP_API_VERSION:
extras['tdvv'] = tdvv
capacity = self._capacity_from_size(volume['size'])
volume_name = self._get_3par_vol_name(volume['id'])
self.client.createVolume(volume_name, cpg, capacity, extras)
if qos or vvs_name or flash_cache is not None:
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg, vvs_name, qos,
flash_cache)
except exception.InvalidInput as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
# v2 replication check
replication_flag = False
if self._volume_of_replicated_type(volume) and (
self._do_volume_replication_setup(volume)):
replication_flag = True
except hpeexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpeexceptions.HTTPBadRequest as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(_LE("Exception: %s"), ex)
raise
except exception.CinderException as ex:
LOG.error(_LE("Exception: %s"), ex)
raise
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return self._get_model_update(volume['host'], cpg,
replication=replication_flag,
provider_location=self.client.id)
def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None,
tpvv=True, tdvv=False):
# Virtual volume sets are not supported with the -online option
LOG.debug('Creating clone of a volume %(src)s to %(dest)s.',
{'src': src_name, 'dest': dest_name})
optional = {'tpvv': tpvv, 'online': True}
if snap_cpg is not None:
optional['snapCPG'] = snap_cpg
if self.API_VERSION >= DEDUP_API_VERSION:
optional['tdvv'] = tdvv
body = self.client.copyVolume(src_name, dest_name, cpg, optional)
return body['taskid']
def get_next_word(self, s, search_string):
"""Return the next word.
Search 's' for 'search_string', if found return the word preceding
'search_string' from 's'.
"""
word = re.search(search_string.strip(' ') + ' ([^ ]*)', s)
return word.groups()[0].strip(' ')
def _get_3par_vol_comment_value(self, vol_comment, key):
comment_dict = dict(ast.literal_eval(vol_comment))
if key in comment_dict:
return comment_dict[key]
return None
def _get_model_update(self, volume_host, cpg, replication=False,
provider_location=None):
"""Get model_update dict to use when we select a pool.
The pools implementation uses a volume['host'] suffix of :poolname.
When the volume comes in with this selected pool, we sometimes use
a different pool (e.g. because the type says to use a different pool).
So in the several places that we do this, we need to return a model
update so that the volume will have the actual pool name in the host
suffix after the operation.
Given a volume_host, which should (might) have the pool suffix, and
given the CPG we actually chose to use, return a dict to use for a
model update iff an update is needed.
:param volume_host: The volume's host string.
:param cpg: The actual pool (cpg) used, for example from the type.
:returns: dict Model update if we need to update volume host, else None
"""
model_update = {}
host = volume_utils.extract_host(volume_host, 'backend')
host_and_pool = volume_utils.append_host(host, cpg)
if volume_host != host_and_pool:
# Since we selected a pool based on type, update the model.
model_update['host'] = host_and_pool
if replication:
model_update['replication_status'] = 'enabled'
if replication and provider_location:
model_update['provider_location'] = provider_location
if not model_update:
model_update = None
return model_update
def _create_temp_snapshot(self, volume):
"""This creates a temporary snapshot of a volume.
This is used by cloning a volume so that we can then
issue extend volume against the original volume.
"""
vol_name = self._get_3par_vol_name(volume['id'])
# create a brand new uuid for the temp snap
snap_uuid = uuid.uuid4().hex
# this will be named tss-%s
snap_name = self._get_3par_snap_name(snap_uuid, temp_snap=True)
extra = {'volume_name': volume['name'],
'volume_id': volume['id']}
optional = {'comment': json.dumps(extra)}
# let the snapshot die in an hour
optional['expirationHours'] = 1
LOG.info(_LI("Creating temp snapshot %(snap)s from volume %(vol)s"),
{'snap': snap_name, 'vol': vol_name})
self.client.createSnapshot(snap_name, vol_name, optional)
return self.client.getVolume(snap_name)
def create_cloned_volume(self, volume, src_vref):
try:
vol_name = self._get_3par_vol_name(volume['id'])
src_vol_name = self._get_3par_vol_name(src_vref['id'])
back_up_process = False
vol_chap_enabled = False
# Check whether a volume is ISCSI and CHAP enabled on it.
if self._client_conf['hpe3par_iscsi_chap_enabled']:
try:
vol_chap_enabled = self.client.getVolumeMetaData(
src_vol_name, 'HPQ-cinder-CHAP-name')['value']
except hpeexceptions.HTTPNotFound:
LOG.debug("CHAP is not enabled on volume %(vol)s ",
{'vol': src_vref['id']})
vol_chap_enabled = False
# Check whether a process is a backup
if str(src_vref['status']) == 'backing-up':
back_up_process = True
# if the sizes of the 2 volumes are the same and except backup
# process for ISCSI volume with chap enabled on it.
# we can do an online copy, which is a background process
# on the 3PAR that makes the volume instantly available.
# We can't resize a volume, while it's being copied.
if volume['size'] == src_vref['size'] and not (
back_up_process and vol_chap_enabled):
LOG.debug("Creating a clone of volume, using online copy.")
type_info = self.get_volume_settings_from_type(volume)
cpg = type_info['cpg']
# make the 3PAR copy the contents.
# can't delete the original until the copy is done.
self._copy_volume(src_vol_name, vol_name, cpg=cpg,
snap_cpg=type_info['snap_cpg'],
tpvv=type_info['tpvv'],
tdvv=type_info['tdvv'])
# v2 replication check
replication_flag = False
if self._volume_of_replicated_type(volume) and (
self._do_volume_replication_setup(volume)):
replication_flag = True
return self._get_model_update(volume['host'], cpg,
replication=replication_flag,
provider_location=self.client.id)
else:
# The size of the new volume is different, so we have to
# copy the volume and wait. Do the resize after the copy
# is complete.
LOG.debug("Creating a clone of volume, using non-online copy.")
# we first have to create the destination volume
model_update = self.create_volume(volume)
optional = {'priority': 1}
body = self.client.copyVolume(src_vol_name, vol_name, None,
optional=optional)
task_id = body['taskid']
task_status = self._wait_for_task_completion(task_id)
if task_status['status'] is not self.client.TASK_DONE:
dbg = {'status': task_status, 'id': volume['id']}
msg = _('Copy volume task failed: create_cloned_volume '
'id=%(id)s, status=%(status)s.') % dbg
raise exception.CinderException(msg)
else:
LOG.debug('Copy volume completed: create_cloned_volume: '
'id=%s.', volume['id'])
return model_update
except hpeexceptions.HTTPForbidden:
raise exception.NotAuthorized()
except hpeexceptions.HTTPNotFound:
raise exception.NotFound()
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
def delete_volume(self, volume):
# v2 replication check
# If the volume type is replication enabled, we want to call our own
# method of deconstructing the volume and its dependencies
if self._volume_of_replicated_type(volume):
replication_status = volume.get('replication_status', None)
if replication_status and replication_status == "failed-over":
self._delete_replicated_failed_over_volume(volume)
else:
self._do_volume_replication_destroy(volume)
return
try:
volume_name = self._get_3par_vol_name(volume['id'])
# Try and delete the volume, it might fail here because
# the volume is part of a volume set which will have the
# volume set name in the error.
try:
self.client.deleteVolume(volume_name)
except hpeexceptions.HTTPBadRequest as ex:
if ex.get_code() == 29:
if self.client.isOnlinePhysicalCopy(volume_name):
LOG.debug("Found an online copy for %(volume)s",
{'volume': volume_name})
# the volume is in process of being cloned.
# stopOnlinePhysicalCopy will also delete
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
LOG.error(_LE("Exception: %s"), ex)
raise
else:
LOG.error(_LE("Exception: %s"), ex)
raise
except hpeexceptions.HTTPConflict as ex:
if ex.get_code() == 34:
# This is a special case which means the
# volume is part of a volume set.
vvset_name = self.client.findVolumeSet(volume_name)
LOG.debug("Returned vvset_name = %s", vvset_name)
if vvset_name is not None and \
vvset_name.startswith('vvs-'):
# We have a single volume per volume set, so
# remove the volume set.
self.client.deleteVolumeSet(
self._get_3par_vvs_name(volume['id']))
elif vvset_name is not None:
# We have a pre-defined volume set just remove the
# volume and leave the volume set.
self.client.removeVolumeFromVolumeSet(vvset_name,
volume_name)
self.client.deleteVolume(volume_name)
elif ex.get_code() == 151:
if self.client.isOnlinePhysicalCopy(volume_name):
LOG.debug("Found an online copy for %(volume)s",
{'volume': volume_name})
# the volume is in process of being cloned.
# stopOnlinePhysicalCopy will also delete
# the volume once it stops the copy.
self.client.stopOnlinePhysicalCopy(volume_name)
else:
# the volume is being operated on in a background
# task on the 3PAR.
# TODO(walter-boring) do a retry a few times.
# for now lets log a better message
msg = _("The volume is currently busy on the 3PAR"
" and cannot be deleted at this time. "
"You can try again later.")
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
elif (ex.get_code() == 32):
# Error 32 means that the volume has children
# see if we have any temp snapshots
snaps = self.client.getVolumeSnapshots(volume_name)
for snap in snaps:
if snap.startswith('tss-'):
# looks like we found a temp snapshot.
LOG.info(
_LI("Found a temporary snapshot %(name)s"),
{'name': snap})
try:
self.client.deleteVolume(snap)
except hpeexceptions.HTTPNotFound:
# if the volume is gone, it's as good as a
# successful delete
pass
except Exception:
msg = _("Volume has a temporary snapshot that "
"can't be deleted at this time.")
raise exception.VolumeIsBusy(message=msg)
try:
self.delete_volume(volume)
except Exception:
msg = _("Volume has children and cannot be deleted!")
raise exception.VolumeIsBusy(message=msg)
else:
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except hpeexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
LOG.warning(_LW("Delete volume id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s"),
{'id': volume['id'], 'msg': ex})
except hpeexceptions.HTTPForbidden as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized(ex.get_description())
except hpeexceptions.HTTPConflict as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.VolumeIsBusy(message=ex.get_description())
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
def create_volume_from_snapshot(self, volume, snapshot, snap_name=None,
vvs_name=None):
"""Creates a volume from a snapshot."""
LOG.debug("Create Volume from Snapshot\n%(vol_name)s\n%(ss_name)s",
{'vol_name': pprint.pformat(volume['display_name']),
'ss_name': pprint.pformat(snapshot['display_name'])})
model_update = {}
if volume['size'] < snapshot['volume_size']:
err = ("You cannot reduce size of the volume. It must "
"be greater than or equal to the snapshot.")
LOG.error(err)
raise exception.InvalidInput(reason=err)
try:
if not snap_name:
snap_name = self._get_3par_snap_name(snapshot['id'])
volume_name = self._get_3par_vol_name(volume['id'])
extra = {'volume_id': volume['id'],
'snapshot_id': snapshot['id']}
type_id = volume.get('volume_type_id', None)
hpe3par_keys, qos, _volume_type, vvs = self.get_type_info(
type_id)
if vvs:
vvs_name = vvs
name = volume.get('display_name', None)
if name:
extra['display_name'] = name
description = volume.get('display_description', None)
if description:
extra['description'] = description
optional = {'comment': json.dumps(extra),
'readOnly': False}
self.client.createSnapshot(volume_name, snap_name, optional)
# Convert snapshot volume to base volume type
LOG.debug('Converting to base volume type: %s.',
volume['id'])
model_update = self._convert_to_base_volume(volume)
# Grow the snapshot if needed
growth_size = volume['size'] - snapshot['volume_size']
if growth_size > 0:
try:
growth_size_mib = growth_size * units.Gi / units.Mi
LOG.debug('Growing volume: %(id)s by %(size)s GiB.',
{'id': volume['id'], 'size': growth_size})
self.client.growVolume(volume_name, growth_size_mib)
except Exception as ex:
LOG.error(_LE("Error extending volume %(id)s. "
"Ex: %(ex)s"),
{'id': volume['id'], 'ex': ex})
# Delete the volume if unable to grow it
self.client.deleteVolume(volume_name)
raise exception.CinderException(ex)
# Check for flash cache setting in extra specs
flash_cache = self.get_flash_cache_policy(hpe3par_keys)
if qos or vvs_name or flash_cache is not None:
cpg_names = self._get_key_value(
hpe3par_keys, 'cpg', self._client_conf['hpe3par_cpg'])
try:
self._add_volume_to_volume_set(volume, volume_name,
cpg_names[0], vvs_name,
qos, flash_cache)
except Exception as ex:
# Delete the volume if unable to add it to the volume set
self.client.deleteVolume(volume_name)
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
# v2 replication check
if self._volume_of_replicated_type(volume) and (
self._do_volume_replication_setup(volume)):
model_update['replication_status'] = 'enabled'
model_update['provider_location'] = self.client.id
except hpeexceptions.HTTPForbidden as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpeexceptions.HTTPNotFound as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotFound()
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return model_update
def create_snapshot(self, snapshot):
LOG.debug("Create Snapshot\n%s", pprint.pformat(snapshot))
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
vol_name = self._get_3par_vol_name(snapshot['volume_id'])
extra = {'volume_name': snapshot['volume_name']}
vol_id = snapshot.get('volume_id', None)
if vol_id:
extra['volume_id'] = vol_id
try:
extra['display_name'] = snapshot['display_name']
except AttributeError:
pass
try:
extra['description'] = snapshot['display_description']
except AttributeError:
pass
optional = {'comment': json.dumps(extra),
'readOnly': True}
if self.config.hpe3par_snapshot_expiration:
optional['expirationHours'] = (
int(self.config.hpe3par_snapshot_expiration))
if self.config.hpe3par_snapshot_retention:
optional['retentionHours'] = (
int(self.config.hpe3par_snapshot_retention))
self.client.createSnapshot(snap_name, vol_name, optional)
except hpeexceptions.HTTPForbidden as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpeexceptions.HTTPNotFound as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotFound()
def migrate_volume(self, volume, host):
"""Migrate directly if source and dest are managed by same storage.
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
:returns: (False, None) if the driver does not support migration,
(True, model_update) if successful
"""
dbg = {'id': volume['id'],
'host': host['host'],
'status': volume['status']}
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, '
'status=%(status)s.', dbg)
ret = False, None
if volume['status'] in ['available', 'in-use']:
volume_type = None
if volume['volume_type_id']:
volume_type = self._get_volume_type(volume['volume_type_id'])
try:
ret = self.retype(volume, volume_type, None, host)
except Exception as e:
LOG.info(_LI('3PAR driver cannot perform migration. '
'Retype exception: %s'), e)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s, '
'status=%(status)s.', dbg)
dbg_ret = {'supported': ret[0], 'model_update': ret[1]}
LOG.debug('migrate_volume result: %(supported)s, %(model_update)s',
dbg_ret)
return ret
def update_migrated_volume(self, context, volume, new_volume,
original_volume_status):
"""Rename the new (temp) volume to it's original name.
This method tries to rename the new volume to it's original
name after the migration has completed.
"""
LOG.debug("Update volume name for %(id)s", {'id': new_volume['id']})
name_id = None
provider_location = None
if original_volume_status == 'available':
# volume isn't attached and can be updated
original_name = self._get_3par_vol_name(volume['id'])
current_name = self._get_3par_vol_name(new_volume['id'])
try:
volumeMods = {'newName': original_name}
self.client.modifyVolume(current_name, volumeMods)
LOG.info(_LI("Volume name changed from %(tmp)s to %(orig)s"),
{'tmp': current_name, 'orig': original_name})
except Exception as e:
LOG.error(_LE("Changing the volume name from %(tmp)s to "
"%(orig)s failed because %(reason)s"),
{'tmp': current_name, 'orig': original_name,
'reason': e})
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
else:
# the backend can't change the name.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def _wait_for_task_completion(self, task_id):
"""This waits for a 3PAR background task complete or fail.
This looks for a task to get out of the 'active' state.
"""
# Wait for the physical copy task to complete
def _wait_for_task(task_id):
status = self.client.getTask(task_id)
LOG.debug("3PAR Task id %(id)s status = %(status)s",
{'id': task_id,
'status': status['status']})
if status['status'] is not self.client.TASK_ACTIVE:
self._task_status = status
raise loopingcall.LoopingCallDone()
self._task_status = None
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_task, task_id)
timer.start(interval=1).wait()
return self._task_status
def _convert_to_base_volume(self, volume, new_cpg=None):
try:
type_info = self.get_volume_settings_from_type(volume)
if new_cpg:
cpg = new_cpg
else:
cpg = type_info['cpg']
# Change the name such that it is unique since 3PAR
# names must be unique across all CPGs
volume_name = self._get_3par_vol_name(volume['id'])
temp_vol_name = volume_name.replace("osv-", "omv-")
# Create a physical copy of the volume
task_id = self._copy_volume(volume_name, temp_vol_name,
cpg, cpg, type_info['tpvv'],
type_info['tdvv'])
LOG.debug('Copy volume scheduled: convert_to_base_volume: '
'id=%s.', volume['id'])
task_status = self._wait_for_task_completion(task_id)
if task_status['status'] is not self.client.TASK_DONE:
dbg = {'status': task_status, 'id': volume['id']}
msg = _('Copy volume task failed: convert_to_base_volume: '
'id=%(id)s, status=%(status)s.') % dbg
raise exception.CinderException(msg)
else:
LOG.debug('Copy volume completed: convert_to_base_volume: '
'id=%s.', volume['id'])
comment = self._get_3par_vol_comment(volume_name)
if comment:
self.client.modifyVolume(temp_vol_name, {'comment': comment})
LOG.debug('Volume rename completed: convert_to_base_volume: '
'id=%s.', volume['id'])
# Delete source volume after the copy is complete
self.client.deleteVolume(volume_name)
LOG.debug('Delete src volume completed: convert_to_base_volume: '
'id=%s.', volume['id'])
# Rename the new volume to the original name
self.client.modifyVolume(temp_vol_name, {'newName': volume_name})
LOG.info(_LI('Completed: convert_to_base_volume: '
'id=%s.'), volume['id'])
except hpeexceptions.HTTPConflict:
msg = _("Volume (%s) already exists on array.") % volume_name
LOG.error(msg)
raise exception.Duplicate(msg)
except hpeexceptions.HTTPBadRequest as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.Invalid(ex.get_description())
except exception.InvalidInput as ex:
LOG.error(_LE("Exception: %s"), ex)
raise
except exception.CinderException as ex:
LOG.error(_LE("Exception: %s"), ex)
raise
except Exception as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.CinderException(ex)
return self._get_model_update(volume['host'], cpg)
def delete_snapshot(self, snapshot):
LOG.debug("Delete Snapshot id %(id)s %(name)s",
{'id': snapshot['id'], 'name': pprint.pformat(snapshot)})
try:
snap_name = self._get_3par_snap_name(snapshot['id'])
self.client.deleteVolume(snap_name)
except hpeexceptions.HTTPForbidden as ex:
LOG.error(_LE("Exception: %s"), ex)
raise exception.NotAuthorized()
except hpeexceptions.HTTPNotFound as ex:
# We'll let this act as if it worked
# it helps clean up the cinder entries.
LOG.warning(_LW("Delete Snapshot id not found. Removing from "
"cinder: %(id)s Ex: %(msg)s"),
{'id': snapshot['id'], 'msg': ex})
except hpeexceptions.HTTPConflict as ex:
if (ex.get_code() == 32):
# Error 32 means that the snapshot has children
# see if we have any temp snapshots
snaps = self.client.getVolumeSnapshots(snap_name)
for snap in snaps:
if snap.startswith('tss-'):
LOG.info(
_LI("Found a temporary snapshot %(name)s"),
{'name': snap})
try:
self.client.deleteVolume(snap)
except hpeexceptions.HTTPNotFound:
# if the volume is gone, it's as good as a
# successful delete
pass
except Exception:
msg = _("Snapshot has a temporary snapshot that "
"can't be deleted at this time.")
raise exception.SnapshotIsBusy(message=msg)
try:
self.client.deleteVolume(snap_name)
except Exception:
msg = _("Snapshot has children and cannot be deleted!")
raise exception.SnapshotIsBusy(message=msg)
else:
LOG.error(_LE("Exception: %s"), ex)
raise exception.SnapshotIsBusy(message=ex.get_description())
def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns):
if wwns is not None and not isinstance(wwns, list):
wwns = [wwns]
if iqns is not None and not isinstance(iqns, list):
iqns = [iqns]
out = self.client.getHosts()
hosts = out['members']
for host in hosts:
if 'iSCSIPaths' in host and iqns is not None:
iscsi_paths = host['iSCSIPaths']
for iscsi in iscsi_paths:
for iqn in iqns:
if iqn == iscsi['name']:
return host['name']
if 'FCPaths' in host and wwns is not None:
fc_paths = host['FCPaths']
for fc in fc_paths:
for wwn in wwns:
if wwn.upper() == fc['wwn'].upper():
return host['name']
def terminate_connection(self, volume, hostname, wwn=None, iqn=None):
"""Driver entry point to unattach a volume from an instance."""
# does 3par know this host by a different name?
hosts = None
if wwn:
hosts = self.client.queryHost(wwns=wwn)
elif iqn:
hosts = self.client.queryHost(iqns=[iqn])
if hosts and hosts['members'] and 'name' in hosts['members'][0]:
hostname = hosts['members'][0]['name']
try:
self.delete_vlun(volume, hostname)
return
except hpeexceptions.HTTPNotFound as e:
if 'host does not exist' in e.get_description():
# If a host is failed-over, we want to allow the detach to
# 'succeed' when it cannot find the host. We can simply
# return out of the terminate connection in order for things
# to be updated correctly.
if self._active_backend_id:
LOG.warning(_LW("Because the host is currently in a "
"failed-over state, the volume will not "
"be properly detached from the primary "
"array. The detach will be considered a "
"success as far as Cinder is concerned. "
"The volume can now be attached to the "
"secondary target."))
return
else:
# use the wwn to see if we can find the hostname
hostname = self._get_3par_hostname_from_wwn_iqn(wwn, iqn)
# no 3par host, re-throw
if hostname is None:
LOG.error(_LE("Exception: %s"), e)
raise
else:
# not a 'host does not exist' HTTPNotFound exception, re-throw
LOG.error(_LE("Exception: %s"), e)
raise
# try again with name retrieved from 3par
self.delete_vlun(volume, hostname)
def build_nsp(self, portPos):
return '%s:%s:%s' % (portPos['node'],
portPos['slot'],
portPos['cardPort'])
def build_portPos(self, nsp):
split = nsp.split(":")
portPos = {}
portPos['node'] = int(split[0])
portPos['slot'] = int(split[1])
portPos['cardPort'] = int(split[2])
return portPos
def tune_vv(self, old_tpvv, new_tpvv, old_tdvv, new_tdvv,
old_cpg, new_cpg, volume_name):
"""Tune the volume to change the userCPG and/or provisioningType.
The volume will be modified/tuned/converted to the new userCPG and
provisioningType, as needed.
TaskWaiter is used to make this function wait until the 3PAR task
is no longer active. When the task is no longer active, then it must
either be done or it is in a state that we need to treat as an error.
"""
if old_tpvv == new_tpvv and old_tdvv == new_tdvv:
if new_cpg != old_cpg:
LOG.info(_LI("Modifying %(volume_name)s userCPG "
"from %(old_cpg)s"
" to %(new_cpg)s"),
{'volume_name': volume_name,
'old_cpg': old_cpg, 'new_cpg': new_cpg})
_response, body = self.client.modifyVolume(
volume_name,
{'action': 6,
'tuneOperation': 1,
'userCPG': new_cpg})
task_id = body['taskid']
status = self.TaskWaiter(self.client, task_id).wait_for_task()
if status['status'] is not self.client.TASK_DONE:
msg = (_('Tune volume task stopped before it was done: '
'volume_name=%(volume_name)s, '
'task-status=%(status)s.') %
{'status': status, 'volume_name': volume_name})
raise exception.VolumeBackendAPIException(msg)
else:
if new_tpvv:
cop = self.CONVERT_TO_THIN
LOG.info(_LI("Converting %(volume_name)s to thin provisioning "
"with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
elif new_tdvv:
cop = self.CONVERT_TO_DEDUP
LOG.info(_LI("Converting %(volume_name)s to thin dedup "
"provisioning with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
else:
cop = self.CONVERT_TO_FULL
LOG.info(_LI("Converting %(volume_name)s to full provisioning "
"with userCPG=%(new_cpg)s"),
{'volume_name': volume_name, 'new_cpg': new_cpg})
try:
response, body = self.client.modifyVolume(
volume_name,
{'action': 6,
'tuneOperation': 1,
'userCPG': new_cpg,
'conversionOperation': cop})
except hpeexceptions.HTTPBadRequest as ex:
if ex.get_code() == 40 and "keepVV" in six.text_type(ex):
# Cannot retype with snapshots because we don't want to
# use keepVV and have straggling volumes. Log additional
# info and then raise.
LOG.info(_LI("tunevv failed because the volume '%s' "
"has snapshots."), volume_name)
raise
task_id = body['taskid']
status = self.TaskWaiter(self.client, task_id).wait_for_task()
if status['status'] is not self.client.TASK_DONE:
msg = (_('Tune volume task stopped before it was done: '
'volume_name=%(volume_name)s, '
'task-status=%(status)s.') %
{'status': status, 'volume_name': volume_name})
raise exception.VolumeBackendAPIException(msg)
def _retype_pre_checks(self, volume, host, new_persona,
old_cpg, new_cpg,
new_snap_cpg):
"""Test retype parameters before making retype changes.
Do pre-retype parameter validation. These checks will
raise an exception if we should not attempt this retype.
"""
if new_persona:
self.validate_persona(new_persona)
if host is not None:
(host_type, host_id, _host_cpg) = (
host['capabilities']['location_info']).split(':')
if not (host_type == 'HPE3PARDriver'):
reason = (_("Cannot retype from HPE3PARDriver to %s.") %
host_type)
raise exception.InvalidHost(reason)
sys_info = self.client.getStorageSystemInfo()
if not (host_id == sys_info['serialNumber']):
reason = (_("Cannot retype from one 3PAR array to another."))
raise exception.InvalidHost(reason)
# Validate new_snap_cpg. A white-space snapCPG will fail eventually,
# but we'd prefer to fail fast -- if this ever happens.
if not new_snap_cpg or new_snap_cpg.isspace():
reason = (_("Invalid new snapCPG name for retype. "
"new_snap_cpg='%s'.") % new_snap_cpg)
raise exception.InvalidInput(reason)
# Check to make sure CPGs are in the same domain
domain = self.get_domain(old_cpg)
if domain != self.get_domain(new_cpg):
reason = (_('Cannot retype to a CPG in a different domain.'))
raise exception.Invalid3PARDomain(reason)
if domain != self.get_domain(new_snap_cpg):
reason = (_('Cannot retype to a snap CPG in a different domain.'))
raise exception.Invalid3PARDomain(reason)
def _retype(self, volume, volume_name, new_type_name, new_type_id, host,
new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg,
old_tpvv, new_tpvv, old_tdvv, new_tdvv,
old_vvs, new_vvs, old_qos, new_qos,
old_flash_cache, new_flash_cache,
old_comment):
action = "volume:retype"
self._retype_pre_checks(volume, host, new_persona,
old_cpg, new_cpg,
new_snap_cpg)
flow_name = action.replace(":", "_") + "_api"
retype_flow = linear_flow.Flow(flow_name)
# Keep this linear and do the big tunevv last. Everything leading
# up to that is reversible, but we'd let the 3PAR deal with tunevv
# errors on its own.
retype_flow.add(
ModifyVolumeTask(action),
ModifySpecsTask(action),
TuneVolumeTask(action))
taskflow.engines.run(
retype_flow,
store={'common': self,
'volume_name': volume_name, 'volume': volume,
'old_tpvv': old_tpvv, 'new_tpvv': new_tpvv,
'old_tdvv': old_tdvv, 'new_tdvv': new_tdvv,
'old_cpg': old_cpg, 'new_cpg': new_cpg,
'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg,
'old_vvs': old_vvs, 'new_vvs': new_vvs,
'old_qos': old_qos, 'new_qos': new_qos,
'old_flash_cache': old_flash_cache,
'new_flash_cache': new_flash_cache,
'new_type_name': new_type_name, 'new_type_id': new_type_id,
'old_comment': old_comment
})
def _retype_from_old_to_new(self, volume, new_type, old_volume_settings,
host):
"""Convert the volume to be of the new type. Given old type settings.
Returns True if the retype was successful.
Uses taskflow to revert changes if errors occur.
:param volume: A dictionary describing the volume to retype
:param new_type: A dictionary describing the volume type to convert to
:param old_volume_settings: Volume settings describing the old type.
:param host: A dictionary describing the host, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities. Host validation
is just skipped if host is None.
"""
volume_id = volume['id']
volume_name = self._get_3par_vol_name(volume_id)
new_type_name = None
new_type_id = None
if new_type:
new_type_name = new_type['name']
new_type_id = new_type['id']
pool = None
if host:
pool = volume_utils.extract_host(host['host'], 'pool')
else:
pool = volume_utils.extract_host(volume['host'], 'pool')
new_volume_settings = self.get_volume_settings_from_type_id(
new_type_id, pool)
new_cpg = new_volume_settings['cpg']
new_snap_cpg = new_volume_settings['snap_cpg']
new_tpvv = new_volume_settings['tpvv']
new_tdvv = new_volume_settings['tdvv']
new_qos = new_volume_settings['qos']
new_vvs = new_volume_settings['vvs_name']
new_persona = None
new_hpe3par_keys = new_volume_settings['hpe3par_keys']
if 'persona' in new_hpe3par_keys:
new_persona = new_hpe3par_keys['persona']
new_flash_cache = self.get_flash_cache_policy(new_hpe3par_keys)
old_qos = old_volume_settings['qos']
old_vvs = old_volume_settings['vvs_name']
old_hpe3par_keys = old_volume_settings['hpe3par_keys']
old_flash_cache = self.get_flash_cache_policy(old_hpe3par_keys)
# Get the current volume info because we can get in a bad state
# if we trust that all the volume type settings are still the
# same settings that were used with this volume.
old_volume_info = self.client.getVolume(volume_name)
old_tpvv = old_volume_info['provisioningType'] == self.THIN
old_tdvv = old_volume_info['provisioningType'] == self.DEDUP
old_cpg = old_volume_info['userCPG']
old_comment = old_volume_info['comment']
old_snap_cpg = None
if 'snapCPG' in old_volume_info:
old_snap_cpg = old_volume_info['snapCPG']
LOG.debug("retype old_volume_info=%s", old_volume_info)
LOG.debug("retype old_volume_settings=%s", old_volume_settings)
LOG.debug("retype new_volume_settings=%s", new_volume_settings)
self._retype(volume, volume_name, new_type_name, new_type_id,
host, new_persona, old_cpg, new_cpg,
old_snap_cpg, new_snap_cpg, old_tpvv, new_tpvv,
old_tdvv, new_tdvv, old_vvs, new_vvs,
old_qos, new_qos, old_flash_cache, new_flash_cache,
old_comment)
if host:
return True, self._get_model_update(host['host'], new_cpg)
else:
return True, self._get_model_update(volume['host'], new_cpg)
def _retype_from_no_type(self, volume, new_type):
"""Convert the volume to be of the new type. Starting from no type.
Returns True if the retype was successful.
Uses taskflow to revert changes if errors occur.
:param volume: A dictionary describing the volume to retype. Except the
volume-type is not used here. This method uses None.
:param new_type: A dictionary describing the volume type to convert to
"""
pool = volume_utils.extract_host(volume['host'], 'pool')
none_type_settings = self.get_volume_settings_from_type_id(None, pool)
return self._retype_from_old_to_new(volume, new_type,
none_type_settings, None)
def retype(self, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns True if the retype was successful.
Uses taskflow to revert changes if errors occur.
:param volume: A dictionary describing the volume to retype
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities. Host validation
is just skipped if host is None.
"""
LOG.debug(("enter: retype: id=%(id)s, new_type=%(new_type)s,"
"diff=%(diff)s, host=%(host)s"), {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
old_volume_settings = self.get_volume_settings_from_type(volume, host)
return self._retype_from_old_to_new(volume, new_type,
old_volume_settings, host)
def find_existing_vlun(self, volume, host):
"""Finds an existing VLUN for a volume on a host.
Returns an existing VLUN's information. If no existing VLUN is found,
None is returned.
:param volume: A dictionary describing a volume.
:param host: A dictionary describing a host.
"""
existing_vlun = None
try:
vol_name = self._get_3par_vol_name(volume['id'])
host_vluns = self.client.getHostVLUNs(host['name'])
# The first existing VLUN found will be returned.
for vlun in host_vluns:
if vlun['volumeName'] == vol_name:
existing_vlun = vlun
break
except hpeexceptions.HTTPNotFound:
# ignore, no existing VLUNs were found
LOG.debug("No existing VLUNs were found for host/volume "
"combination: %(host)s, %(vol)s",
{'host': host['name'],
'vol': vol_name})
pass
return existing_vlun
def find_existing_vluns(self, volume, host):
existing_vluns = []
try:
vol_name = self._get_3par_vol_name(volume['id'])
host_vluns = self.client.getHostVLUNs(host['name'])
for vlun in host_vluns:
if vlun['volumeName'] == vol_name:
existing_vluns.append(vlun)
except hpeexceptions.HTTPNotFound:
# ignore, no existing VLUNs were found
LOG.debug("No existing VLUNs were found for host/volume "
"combination: %(host)s, %(vol)s",
{'host': host['name'],
'vol': vol_name})
pass
return existing_vluns
# v2 replication methods
def failover_host(self, context, volumes, secondary_backend_id):
"""Force failover to a secondary replication target."""
# Ensure replication is enabled before we try and failover.
if not self._replication_enabled:
msg = _LE("Issuing a fail-over failed because replication is "
"not properly configured.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Check to see if the user requested to failback.
if (secondary_backend_id and
secondary_backend_id == self.FAILBACK_VALUE):
volume_update_list = self._replication_failback(volumes)
target_id = None
else:
# Find the failover target.
failover_target = None
for target in self._replication_targets:
if target['backend_id'] == secondary_backend_id:
failover_target = target
break
if not failover_target:
msg = _("A valid secondary target MUST be specified in order "
"to failover.")
LOG.error(msg)
raise exception.InvalidReplicationTarget(reason=msg)
target_id = failover_target['backend_id']
# For each volume, if it is replicated, we want to fail it over.
volume_update_list = []
for volume in volumes:
if self._volume_of_replicated_type(volume):
try:
# Try and stop remote-copy on main array. We eat the
# exception here because when an array goes down, the
# groups will stop automatically.
rcg_name = self._get_3par_rcg_name(volume['id'])
self.client.stopRemoteCopy(rcg_name)
except Exception:
pass
try:
# Failover to secondary array.
remote_rcg_name = self._get_3par_remote_rcg_name(
volume['id'], volume['provider_location'])
cl = self._create_replication_client(failover_target)
cl.recoverRemoteCopyGroupFromDisaster(
remote_rcg_name, self.RC_ACTION_CHANGE_TO_PRIMARY)
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'failed-over'}})
except Exception as ex:
msg = (_LE("There was a problem with the failover "
"(%(error)s) and it was unsuccessful. "
"Volume '%(volume)s will not be available "
"on the failed over target."),
{'error': six.text_type(ex),
'volume': volume['id']})
LOG.error(msg)
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'error'}})
finally:
self._destroy_replication_client(cl)
else:
# If the volume is not of replicated type, we need to
# force the status into error state so a user knows they
# do not have access to the volume.
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'status': 'error'}})
return target_id, volume_update_list
def _replication_failback(self, volumes):
# Make sure the proper steps on the backend have been completed before
# we allow a fail-over.
if not self._is_host_ready_for_failback(volumes):
msg = _("The host is not ready to be failed back. Please "
"resynchronize the volumes and resume replication on the "
"3PAR backends.")
LOG.error(msg)
raise exception.InvalidReplicationTarget(reason=msg)
# Update the volumes status to available.
volume_update_list = []
for volume in volumes:
if self._volume_of_replicated_type(volume):
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'replication_status': 'available'}})
else:
# Upon failing back, we can move the non-replicated volumes
# back into available state.
volume_update_list.append(
{'volume_id': volume['id'],
'updates': {'status': 'available'}})
return volume_update_list
def _is_host_ready_for_failback(self, volumes):
"""Checks to make sure the volume has been synchronized
This ensures that all the remote copy targets have been restored
to their natural direction, and all of the volumes have been
fully synchronized.
"""
try:
for volume in volumes:
if self._volume_of_replicated_type(volume):
location = volume.get('provider_location')
remote_rcg_name = self._get_3par_remote_rcg_name(
volume['id'],
location)
rcg = self.client.getRemoteCopyGroup(remote_rcg_name)
# Make sure all targets are in their natural direction.
targets = rcg['targets']
for target in targets:
if target['roleReversed'] or (
target['state'] != self.RC_GROUP_STARTED):
return False
# Make sure all volumes are fully synced.
volumes = rcg['volumes']
for volume in volumes:
remote_volumes = volume['remoteVolumes']
for remote_volume in remote_volumes:
if remote_volume['syncStatus'] != (
self.SYNC_STATUS_COMPLETED):
return False
except Exception:
# If there was a problem, we will return false so we can
# log an error in the parent function.
return False
return True
def _do_replication_setup(self):
replication_targets = []
replication_devices = self.config.replication_device
if replication_devices:
for dev in replication_devices:
remote_array = dict(dev.items())
# Override and set defaults for certain entries
remote_array['managed_backend_name'] = (
dev.get('managed_backend_name'))
remote_array['replication_mode'] = (
self._get_remote_copy_mode_num(
dev.get('replication_mode')))
remote_array['san_ssh_port'] = (
dev.get('san_ssh_port', self.config.san_ssh_port))
remote_array['ssh_conn_timeout'] = (
dev.get('ssh_conn_timeout', self.config.ssh_conn_timeout))
remote_array['san_private_key'] = (
dev.get('san_private_key', self.config.san_private_key))
# Format iscsi IPs correctly
iscsi_ips = dev.get('hpe3par_iscsi_ips')
if iscsi_ips:
remote_array['hpe3par_iscsi_ips'] = iscsi_ips.split(' ')
# Format hpe3par_iscsi_chap_enabled as a bool
remote_array['hpe3par_iscsi_chap_enabled'] = (
dev.get('hpe3par_iscsi_chap_enabled') == 'True')
array_name = remote_array['backend_id']
# Make sure we can log into the array, that it has been
# correctly configured, and its API version meets the
# minimum requirement.
cl = None
try:
cl = self._create_replication_client(remote_array)
array_id = six.text_type(cl.getStorageSystemInfo()['id'])
remote_array['id'] = array_id
wsapi_version = cl.getWsApiVersion()['build']
if wsapi_version < REMOTE_COPY_API_VERSION:
msg = (_LW("The secondary array must have an API "
"version of %(min_ver)s or higher. Array "
"'%(target)s' is on %(target_ver)s, "
"therefore it will not be added as a valid "
"replication target.") %
{'target': array_name,
'min_ver': REMOTE_COPY_API_VERSION,
'target_ver': wsapi_version})
LOG.warning(msg)
elif not self._is_valid_replication_array(remote_array):
msg = (_LW("'%s' is not a valid replication array. "
"In order to be valid, backend_id, "
"replication_mode, "
"hpe3par_api_url, hpe3par_username, "
"hpe3par_password, cpg_map, san_ip, "
"san_login, and san_password "
"must be specified. If the target is "
"managed, managed_backend_name must be set "
"as well.") % array_name)
LOG.warning(msg)
else:
replication_targets.append(remote_array)
except Exception:
msg = (_LE("Could not log in to 3PAR array (%s) with the "
"provided credentials.") % array_name)
LOG.error(msg)
finally:
self._destroy_replication_client(cl)
self._replication_targets = replication_targets
if self._is_replication_configured_correct():
self._replication_enabled = True
def _is_valid_replication_array(self, target):
required_flags = ['hpe3par_api_url', 'hpe3par_username',
'hpe3par_password', 'san_ip', 'san_login',
'san_password', 'backend_id',
'replication_mode', 'cpg_map']
try:
self.check_replication_flags(target, required_flags)
return True
except Exception:
return False
def _is_replication_configured_correct(self):
rep_flag = True
# Make sure there is at least one replication target.
if len(self._replication_targets) < 1:
LOG.error(_LE("There must be at least one valid replication "
"device configured."))
rep_flag = False
return rep_flag
def _is_replication_mode_correct(self, mode, sync_num):
rep_flag = True
# Make sure replication_mode is set to either sync|periodic.
mode = self._get_remote_copy_mode_num(mode)
if not mode:
LOG.error(_LE("Extra spec replication:mode must be set and must "
"be either 'sync' or 'periodic'."))
rep_flag = False
else:
# If replication:mode is periodic, replication_sync_period must be
# set between 300 - 31622400 seconds.
if mode == self.PERIODIC and (
sync_num < 300 or sync_num > 31622400):
LOG.error(_LE("Extra spec replication:sync_period must be "
"greater than 299 and less than 31622401 "
"seconds."))
rep_flag = False
return rep_flag
def _volume_of_replicated_type(self, volume):
replicated_type = False
volume_type_id = volume.get('volume_type_id')
if volume_type_id:
volume_type = self._get_volume_type(volume_type_id)
extra_specs = volume_type.get('extra_specs')
if extra_specs and 'replication_enabled' in extra_specs:
rep_val = extra_specs['replication_enabled']
replicated_type = (rep_val == "<is> True")
return replicated_type
def _is_volume_in_remote_copy_group(self, volume):
rcg_name = self._get_3par_rcg_name(volume['id'])
try:
self.client.getRemoteCopyGroup(rcg_name)
return True
except hpeexceptions.HTTPNotFound:
return False
def _get_remote_copy_mode_num(self, mode):
ret_mode = None
if mode == "sync":
ret_mode = self.SYNC
if mode == "periodic":
ret_mode = self.PERIODIC
return ret_mode
def _get_3par_config(self):
self._do_replication_setup()
conf = None
if self._replication_enabled:
for target in self._replication_targets:
if target['backend_id'] == self._active_backend_id:
conf = target
break
self._build_3par_config(conf)
def _build_3par_config(self, conf=None):
"""Build 3PAR client config dictionary.
self._client_conf will contain values from self.config if the volume
is located on the primary array in order to properly contact it. If
the volume has been failed over and therefore on a secondary array,
self._client_conf will contain values on how to contact that array.
The only time we will return with entries from a secondary array is
with unmanaged replication.
"""
if conf:
self._client_conf['hpe3par_cpg'] = self._generate_hpe3par_cpgs(
conf.get('cpg_map'))
self._client_conf['hpe3par_username'] = (
conf.get('hpe3par_username'))
self._client_conf['hpe3par_password'] = (
conf.get('hpe3par_password'))
self._client_conf['san_ip'] = conf.get('san_ip')
self._client_conf['san_login'] = conf.get('san_login')
self._client_conf['san_password'] = conf.get('san_password')
self._client_conf['san_ssh_port'] = conf.get('san_ssh_port')
self._client_conf['ssh_conn_timeout'] = (
conf.get('ssh_conn_timeout'))
self._client_conf['san_private_key'] = conf.get('san_private_key')
self._client_conf['hpe3par_api_url'] = conf.get('hpe3par_api_url')
self._client_conf['hpe3par_iscsi_ips'] = (
conf.get('hpe3par_iscsi_ips'))
self._client_conf['hpe3par_iscsi_chap_enabled'] = (
conf.get('hpe3par_iscsi_chap_enabled'))
self._client_conf['iscsi_ip_address'] = (
conf.get('iscsi_ip_address'))
self._client_conf['iscsi_port'] = conf.get('iscsi_port')
else:
self._client_conf['hpe3par_cpg'] = (
self.config.hpe3par_cpg)
self._client_conf['hpe3par_username'] = (
self.config.hpe3par_username)
self._client_conf['hpe3par_password'] = (
self.config.hpe3par_password)
self._client_conf['san_ip'] = self.config.san_ip
self._client_conf['san_login'] = self.config.san_login
self._client_conf['san_password'] = self.config.san_password
self._client_conf['san_ssh_port'] = self.config.san_ssh_port
self._client_conf['ssh_conn_timeout'] = (
self.config.ssh_conn_timeout)
self._client_conf['san_private_key'] = self.config.san_private_key
self._client_conf['hpe3par_api_url'] = self.config.hpe3par_api_url
self._client_conf['hpe3par_iscsi_ips'] = (
self.config.hpe3par_iscsi_ips)
self._client_conf['hpe3par_iscsi_chap_enabled'] = (
self.config.hpe3par_iscsi_chap_enabled)
self._client_conf['iscsi_ip_address'] = (
self.config.iscsi_ip_address)
self._client_conf['iscsi_port'] = self.config.iscsi_port
def _get_cpg_from_cpg_map(self, cpg_map, target_cpg):
ret_target_cpg = None
cpg_pairs = cpg_map.split(' ')
for cpg_pair in cpg_pairs:
cpgs = cpg_pair.split(':')
cpg = cpgs[0]
dest_cpg = cpgs[1]
if cpg == target_cpg:
ret_target_cpg = dest_cpg
return ret_target_cpg
def _generate_hpe3par_cpgs(self, cpg_map):
hpe3par_cpgs = []
cpg_pairs = cpg_map.split(' ')
for cpg_pair in cpg_pairs:
cpgs = cpg_pair.split(':')
hpe3par_cpgs.append(cpgs[1])
return hpe3par_cpgs
def _get_replication_targets(self):
replication_targets = []
for target in self._replication_targets:
replication_targets.append(target['backend_id'])
return replication_targets
def _do_volume_replication_setup(self, volume):
"""This function will do or ensure the following:
-Create volume on main array (already done in create_volume)
-Create Remote Copy Group on main array
-Add volume to Remote Copy Group on main array
-Start remote copy
If anything here fails, we will need to clean everything up in
reverse order, including the original volume.
"""
rcg_name = self._get_3par_rcg_name(volume['id'])
# If the volume is already in a remote copy group, return True
# after starting remote copy. If remote copy is already started,
# issuing this command again will be fine.
if self._is_volume_in_remote_copy_group(volume):
try:
self.client.startRemoteCopy(rcg_name)
except Exception:
pass
return True
try:
# Grab the extra_spec entries for replication and make sure they
# are set correctly.
volume_type = self._get_volume_type(volume["volume_type_id"])
extra_specs = volume_type.get("extra_specs")
replication_mode = extra_specs.get(
self.EXTRA_SPEC_REP_MODE, self.DEFAULT_REP_MODE)
replication_mode_num = self._get_remote_copy_mode_num(
replication_mode)
replication_sync_period = extra_specs.get(
self.EXTRA_SPEC_REP_SYNC_PERIOD, self.DEFAULT_SYNC_PERIOD)
if replication_sync_period:
replication_sync_period = int(replication_sync_period)
if not self._is_replication_mode_correct(replication_mode,
replication_sync_period):
msg = _("The replication mode was not configured correctly "
"in the volume type extra_specs. If replication:mode "
"is periodic, replication:sync_period must also be "
"specified and be between 300 and 31622400 seconds.")
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
vol_settings = self.get_volume_settings_from_type(volume)
local_cpg = vol_settings['cpg']
vol_name = self._get_3par_vol_name(volume['id'])
# Create remote copy group on main array.
rcg_targets = []
sync_targets = []
for target in self._replication_targets:
# Only add targets that match the volumes replication mode.
if target['replication_mode'] == replication_mode_num:
cpg = self._get_cpg_from_cpg_map(target['cpg_map'],
local_cpg)
rcg_target = {'targetName': target['backend_id'],
'mode': replication_mode_num,
'snapCPG': cpg,
'userCPG': cpg}
rcg_targets.append(rcg_target)
sync_target = {'targetName': target['backend_id'],
'syncPeriod': replication_sync_period}
sync_targets.append(sync_target)
optional = {'localSnapCPG': vol_settings['snap_cpg'],
'localUserCPG': local_cpg}
pool = volume_utils.extract_host(volume['host'], level='pool')
domain = self.get_domain(pool)
if domain:
optional["domain"] = domain
try:
self.client.createRemoteCopyGroup(rcg_name, rcg_targets,
optional)
except Exception as ex:
msg = (_("There was an error creating the remote copy "
"group: %s.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Add volume to remote copy group.
rcg_targets = []
for target in self._replication_targets:
# Only add targets that match the volumes replication mode.
if target['replication_mode'] == replication_mode_num:
rcg_target = {'targetName': target['backend_id'],
'secVolumeName': vol_name}
rcg_targets.append(rcg_target)
optional = {'volumeAutoCreation': True}
try:
self.client.addVolumeToRemoteCopyGroup(rcg_name, vol_name,
rcg_targets,
optional=optional)
except Exception as ex:
msg = (_("There was an error adding the volume to the remote "
"copy group: %s.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Check and see if we are in periodic mode. If we are, update
# Remote Copy Group to have a sync period.
if replication_sync_period and (
replication_mode_num == self.PERIODIC):
opt = {'targets': sync_targets}
try:
self.client.modifyRemoteCopyGroup(rcg_name, opt)
except Exception as ex:
msg = (_("There was an error setting the sync period for "
"the remote copy group: %s.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Start the remote copy.
try:
self.client.startRemoteCopy(rcg_name)
except Exception as ex:
msg = (_("There was an error starting remote copy: %s.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return True
except Exception as ex:
self._do_volume_replication_destroy(volume)
msg = (_("There was an error setting up a remote copy group "
"on the 3PAR arrays: ('%s'). The volume will not be "
"recognized as replication type.") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _do_volume_replication_destroy(self, volume, rcg_name=None):
"""This will completely remove all traces of a remote copy group.
It should be used when deleting a replication enabled volume
or if setting up a remote copy group fails. It will try and do the
following:
-Stop remote copy
-Remove volume from Remote Copy Group on main array
-Delete Remote Copy Group from main array
-Delete volume from main array
"""
if not rcg_name:
rcg_name = self._get_3par_rcg_name(volume['id'])
vol_name = self._get_3par_vol_name(volume['id'])
# Stop remote copy.
try:
self.client.stopRemoteCopy(rcg_name)
except Exception:
pass
# Delete volume from remote copy group on main array.
try:
self.client.removeVolumeFromRemoteCopyGroup(
rcg_name, vol_name, removeFromTarget=True)
except Exception:
pass
# Delete remote copy group on main array.
try:
self.client.removeRemoteCopyGroup(rcg_name)
except Exception:
pass
# Delete volume on the main array.
try:
self.client.deleteVolume(vol_name)
except Exception:
pass
def _delete_replicated_failed_over_volume(self, volume):
location = volume.get('provider_location')
rcg_name = self._get_3par_remote_rcg_name(volume['id'], location)
targets = self.client.getRemoteCopyGroup(rcg_name)['targets']
# When failed over, we want to temporarily disable config mirroring
# in order to be allowed to delete the volume and remote copy group
for target in targets:
target_name = target['targetName']
self.client.toggleRemoteCopyConfigMirror(target_name,
mirror_config=False)
# Do regular volume replication destroy now config mirroring is off
try:
self._do_volume_replication_destroy(volume, rcg_name)
except Exception as ex:
msg = (_("The failed-over volume could not be deleted: %s") %
six.text_type(ex))
LOG.error(msg)
raise exception.VolumeIsBusy(message=msg)
finally:
# Turn config mirroring back on
for target in targets:
target_name = target['targetName']
self.client.toggleRemoteCopyConfigMirror(target_name,
mirror_config=True)
class TaskWaiter(object):
"""TaskWaiter waits for task to be not active and returns status."""
def __init__(self, client, task_id, interval=1, initial_delay=0):
self.client = client
self.task_id = task_id
self.interval = interval
self.initial_delay = initial_delay
def _wait_for_task(self):
status = self.client.getTask(self.task_id)
LOG.debug("3PAR Task id %(id)s status = %(status)s",
{'id': self.task_id,
'status': status['status']})
if status['status'] is not self.client.TASK_ACTIVE:
raise loopingcall.LoopingCallDone(status)
def wait_for_task(self):
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_task)
return timer.start(interval=self.interval,
initial_delay=self.initial_delay).wait()
class ModifyVolumeTask(flow_utils.CinderTask):
"""Task to change a volume's snapCPG and comment.
This is a task for changing the snapCPG and comment. It is intended for
use during retype(). These changes are done together with a single
modify request which should be fast and easy to revert.
Because we do not support retype with existing snapshots, we can change
the snapCPG without using a keepVV. If snapshots exist, then this will
fail, as desired.
This task does not change the userCPG or provisioningType. Those changes
may require tunevv, so they are done by the TuneVolumeTask.
The new comment will contain the new type, VVS and QOS information along
with whatever else was in the old comment dict.
The old comment and snapCPG are restored if revert is called.
"""
def __init__(self, action):
self.needs_revert = False
super(ModifyVolumeTask, self).__init__(addons=[action])
def _get_new_comment(self, old_comment, new_vvs, new_qos,
new_type_name, new_type_id):
# Modify the comment during ModifyVolume
comment_dict = dict(ast.literal_eval(old_comment))
if 'vvs' in comment_dict:
del comment_dict['vvs']
if 'qos' in comment_dict:
del comment_dict['qos']
if new_vvs:
comment_dict['vvs'] = new_vvs
elif new_qos:
comment_dict['qos'] = new_qos
else:
comment_dict['qos'] = {}
if new_type_name:
comment_dict['volume_type_name'] = new_type_name
else:
comment_dict.pop('volume_type_name', None)
if new_type_id:
comment_dict['volume_type_id'] = new_type_id
else:
comment_dict.pop('volume_type_id', None)
return comment_dict
def execute(self, common, volume_name, old_snap_cpg, new_snap_cpg,
old_comment, new_vvs, new_qos, new_type_name, new_type_id):
comment_dict = self._get_new_comment(
old_comment, new_vvs, new_qos, new_type_name, new_type_id)
if new_snap_cpg != old_snap_cpg:
# Modify the snap_cpg. This will fail with snapshots.
LOG.info(_LI("Modifying %(volume_name)s snap_cpg from "
"%(old_snap_cpg)s to %(new_snap_cpg)s."),
{'volume_name': volume_name,
'old_snap_cpg': old_snap_cpg,
'new_snap_cpg': new_snap_cpg})
common.client.modifyVolume(
volume_name,
{'snapCPG': new_snap_cpg,
'comment': json.dumps(comment_dict)})
self.needs_revert = True
else:
LOG.info(_LI("Modifying %s comments."), volume_name)
common.client.modifyVolume(
volume_name,
{'comment': json.dumps(comment_dict)})
self.needs_revert = True
def revert(self, common, volume_name, old_snap_cpg, new_snap_cpg,
old_comment, **kwargs):
if self.needs_revert:
LOG.info(_LI("Retype revert %(volume_name)s snap_cpg from "
"%(new_snap_cpg)s back to %(old_snap_cpg)s."),
{'volume_name': volume_name,
'new_snap_cpg': new_snap_cpg,
'old_snap_cpg': old_snap_cpg})
try:
common.client.modifyVolume(
volume_name,
{'snapCPG': old_snap_cpg, 'comment': old_comment})
except Exception as ex:
LOG.error(_LE("Exception during snapCPG revert: %s"), ex)
class TuneVolumeTask(flow_utils.CinderTask):
"""Task to change a volume's CPG and/or provisioning type.
This is a task for changing the CPG and/or provisioning type. It is
intended for use during retype(). This task has no revert. The current
design is to do this task last and do revert-able tasks first. Un-doing a
tunevv can be expensive and should be avoided.
"""
def __init__(self, action, **kwargs):
super(TuneVolumeTask, self).__init__(addons=[action])
def execute(self, common, old_tpvv, new_tpvv, old_tdvv, new_tdvv,
old_cpg, new_cpg, volume_name):
common.tune_vv(old_tpvv, new_tpvv, old_tdvv, new_tdvv,
old_cpg, new_cpg, volume_name)
class ModifySpecsTask(flow_utils.CinderTask):
"""Set/unset the QOS settings and/or VV set for the volume's new type.
This is a task for changing the QOS settings and/or VV set. It is intended
for use during retype(). If changes are made during execute(), then they
need to be undone if revert() is called (i.e., if a later task fails).
For 3PAR, we ignore QOS settings if a VVS is explicitly set, otherwise we
create a VV set and use that for QOS settings. That is why they are lumped
together here. Most of the decision-making about VVS vs. QOS settings vs.
old-style scoped extra-specs is handled in existing reusable code. Here
we mainly need to know what old stuff to remove before calling the function
that knows how to set the new stuff.
Basic task flow is as follows: Remove the volume from the old externally
created VVS (when appropriate), delete the old cinder-created VVS, call
the function that knows how to set a new VVS or QOS settings.
If any changes are made during execute, then revert needs to reverse them.
"""
def __init__(self, action):
self.needs_revert = False
super(ModifySpecsTask, self).__init__(addons=[action])
def execute(self, common, volume_name, volume, old_cpg, new_cpg,
old_vvs, new_vvs, old_qos, new_qos,
old_flash_cache, new_flash_cache):
if (old_vvs != new_vvs or
old_qos != new_qos or
old_flash_cache != new_flash_cache):
# Remove VV from old VV Set.
if old_vvs is not None and old_vvs != new_vvs:
common.client.removeVolumeFromVolumeSet(old_vvs,
volume_name)
self.needs_revert = True
# If any extra or qos specs changed then remove the old
# special VV set that we create. We'll recreate it
# as needed.
vvs_name = common._get_3par_vvs_name(volume['id'])
try:
common.client.deleteVolumeSet(vvs_name)
self.needs_revert = True
except hpeexceptions.HTTPNotFound as ex:
# HTTPNotFound(code=102) is OK. Set does not exist.
if ex.get_code() != 102:
LOG.error(_LE("Unexpected error when retype() tried to "
"deleteVolumeSet(%s)"), vvs_name)
raise
if new_vvs or new_qos or new_flash_cache:
common._add_volume_to_volume_set(
volume, volume_name, new_cpg, new_vvs, new_qos,
new_flash_cache)
self.needs_revert = True
def revert(self, common, volume_name, volume, old_vvs, new_vvs, old_qos,
old_cpg, **kwargs):
if self.needs_revert:
# If any extra or qos specs changed then remove the old
# special VV set that we create and recreate it per
# the old type specs.
vvs_name = common._get_3par_vvs_name(volume['id'])
try:
common.client.deleteVolumeSet(vvs_name)
except hpeexceptions.HTTPNotFound as ex:
# HTTPNotFound(code=102) is OK. Set does not exist.
if ex.get_code() != 102:
LOG.error(_LE("Unexpected error when retype() revert "
"tried to deleteVolumeSet(%s)"), vvs_name)
except Exception:
LOG.error(_LE("Unexpected error when retype() revert "
"tried to deleteVolumeSet(%s)"), vvs_name)
if old_vvs is not None or old_qos is not None:
try:
common._add_volume_to_volume_set(
volume, volume_name, old_cpg, old_vvs, old_qos)
except Exception as ex:
LOG.error(_LE("%(exception)s: Exception during revert of "
"retype for volume %(volume_name)s. "
"Original volume set/QOS settings may not "
"have been fully restored."),
{'exception': ex, 'volume_name': volume_name})
if new_vvs is not None and old_vvs != new_vvs:
try:
common.client.removeVolumeFromVolumeSet(
new_vvs, volume_name)
except Exception as ex:
LOG.error(_LE("%(exception)s: Exception during revert of "
"retype for volume %(volume_name)s. "
"Failed to remove from new volume set "
"%(new_vvs)s."),
{'exception': ex,
'volume_name': volume_name,
'new_vvs': new_vvs})
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.