repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
yl565/statsmodels
|
statsmodels/tsa/statespace/tests/results/results_structural.py
|
7
|
7399
|
"""
Results for SARIMAX tests
Results from R, KFAS library using script `test_ucm.R`.
See also Stata time series documentation.
Author: Chad Fulton
License: Simplified-BSD
"""
from numpy import pi
irregular = {
'models': [
{'irregular': True},
{'level': 'irregular'},
{'level': 'ntrend'},
],
'params': [36.74687342],
'llf': -653.8562525,
'kwargs': {}
}
# this model will issue a warning that there is no stochastic component, and
# will then add an irregular component. Thus it's output will be just like
# the "deterministic constant" model.
fixed_intercept = {
'models': [
{'level': True},
{'level': 'fixed intercept'},
],
'params': [2.127438969],
'llf': -365.5289923,
'kwargs': {}
}
deterministic_constant = {
'models': [
{'irregular': True, 'level': True},
{'level': 'deterministic constant'},
{'level': 'dconstant'},
],
'params': [2.127438969],
'llf': -365.5289923,
'kwargs': {}
}
local_level = {
'models': [
{'irregular': True, 'level': True, 'stochastic_level': True},
{'level': 'local level'},
{'level': 'llevel'}
],
'params': [4.256647886e-06, 1.182078808e-01],
'llf': -70.97242557,
'kwargs': {}
}
random_walk = {
'models': [
{'level': True, 'stochastic_level': True},
{'level': 'random walk'},
{'level': 'rwalk'},
],
'params': [0.1182174646],
'llf': -70.96771641,
'kwargs': {}
}
# this model will issue a warning that there is no stochastic component, and
# will then add an irregular component. Thus it's output will be just like
# the "deterministic trend" model.
fixed_slope = {
'models': [
{'level': True, 'trend': True},
{'level': 'fixed slope'},
],
'params': [2.134137554],
'llf': -370.7758666,
'kwargs': {}
}
deterministic_trend = {
'models': [
{'irregular': True, 'level': True, 'trend': True},
{'level': 'deterministic trend'},
{'level': 'dtrend'},
],
'params': [2.134137554],
'llf': -370.7758666,
'kwargs': {}
}
local_linear_deterministic_trend = {
'models': [
{'irregular': True, 'level': True, 'stochastic_level': True,
'trend': True},
{'level': 'local linear deterministic trend'},
{'level': 'lldtrend'},
],
'params': [4.457592057e-06, 1.184455029e-01],
'llf': -73.47291031,
'kwargs': {}
}
random_walk_with_drift = {
'models': [
{'level': True, 'stochastic_level': True, 'trend': True},
{'level': 'random walk with drift'},
{'level': 'rwdrift'},
],
'params': [0.1184499547],
'llf': -73.46798576,
'kwargs': {}
}
local_linear_trend = {
'models': [
{'irregular': True, 'level': True, 'stochastic_level': True,
'trend': True, 'stochastic_trend': True},
{'level': 'local linear trend'},
{'level': 'lltrend'}
],
'params': [1.339852549e-06, 1.008704925e-02, 6.091760810e-02],
'llf': -31.15640107,
'kwargs': {}
}
smooth_trend = {
'models': [
{'irregular': True, 'level': True, 'trend': True,
'stochastic_trend': True},
{'level': 'smooth trend'},
{'level': 'strend'},
],
'params': [0.0008824099119, 0.0753064234342],
'llf': -31.92261408,
'kwargs': {}
}
random_trend = {
'models': [
{'level': True, 'trend': True, 'stochastic_trend': True},
{'level': 'random trend'},
{'level': 'rtrend'},
],
'params': [0.08054724989],
'llf': -32.05607557,
'kwargs': {}
}
cycle = {
'models': [{'irregular': True, 'cycle': True, 'stochastic_cycle': True,
'damped_cycle': True}],
'params': [37.57197224, 0.1, 2*pi/10, 1],
'llf': -672.3102588,
'kwargs': {
# Required due to the way KFAS estimated loglikelihood which P1inf is
# set in the R code
'loglikelihood_burn': 0
}
}
seasonal = {
'models': [{'irregular': True, 'seasonal': 4}],
'params': [38.1704278, 0.1],
'llf': -655.3337155,
'kwargs': {},
'rtol': 1e-6
}
reg = {
# Note: The test needs to fill in exog=np.log(dta['realgdp'])
'models': [
{'irregular': True, 'exog': True, 'mle_regression': False},
{'level': 'irregular', 'exog': True, 'mle_regression': False},
{'level': 'ntrend', 'exog': True, 'mle_regression': False},
{'level': 'ntrend', 'exog': 'numpy', 'mle_regression': False},
],
'params': [2.215447924],
'llf': -379.6233483,
'kwargs': {
# Required due to the way KFAS estimated loglikelihood which P1inf is
# set in the R code
'loglikelihood_burn': 0
}
}
rtrend_ar1 = {
'models': [
{'level': True, 'trend': True, 'stochastic_trend': True,
'autoregressive': 1},
{'level': 'random trend', 'autoregressive': 1},
{'level': 'rtrend', 'autoregressive': 1}
],
'params': [0.0609, 0.0097, 0.9592],
'llf': -31.15629379,
'kwargs': {}
}
lltrend_cycle_seasonal_reg_ar1 = {
# Note: The test needs to fill in exog=np.log(dta['realgdp'])
'models': [
# Complete specification
{'irregular': True, 'level': True, 'stochastic_level': True,
'trend': True, 'stochastic_trend': True, 'cycle': True,
'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
'exog': True, 'mle_regression': False},
# Verbose string specification
{'level': 'local linear trend', 'autoregressive': 1, 'cycle': True,
'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
'exog': True, 'mle_regression': False},
# Abbreviated string specification
{'level': 'lltrend', 'autoregressive': 1, 'cycle': True,
'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
'exog': True, 'mle_regression': False},
# Numpy exog dataset
{'level': 'lltrend', 'autoregressive': 1, 'cycle': True,
'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
'exog': 'numpy', 'mle_regression': False,},
# Annual frequency dataset
{'level': 'lltrend', 'autoregressive': 1, 'cycle': True,
'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
'exog': True, 'mle_regression': False, 'freq':'AS'},
# Quarterly frequency dataset
{'level': 'lltrend', 'autoregressive': 1, 'cycle': True,
'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
'exog': True, 'mle_regression': False, 'freq':'QS'},
# Monthly frequency dataset
{'level': 'lltrend', 'autoregressive': 1, 'cycle': True,
'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
'exog': True, 'mle_regression': False, 'freq':'MS'},
# Minutely frequency dataset
{'level': 'lltrend', 'autoregressive': 1, 'cycle': True,
'stochastic_cycle': True, 'seasonal': 4, 'autoregressive': 1,
'exog': True, 'mle_regression': False, 'freq':'T',
'cycle_period_bounds': (1.5*12, 12*12)},
],
'params': [0.0001, 0.01, 0.06, 0.0001, 0.0001, 0.1, 2*pi / 10, 0.2],
'llf': -168.5258709,
'kwargs': {
# Required due to the way KFAS estimated loglikelihood which P1inf is
# set in the R code
'loglikelihood_burn': 0
}
}
|
bsd-3-clause
|
rickhurst/Django-non-rel-blog
|
django/contrib/localflavor/ar/forms.py
|
309
|
3903
|
# -*- coding: utf-8 -*-
"""
AR-specific Form helpers.
"""
from django.forms import ValidationError
from django.core.validators import EMPTY_VALUES
from django.forms.fields import RegexField, CharField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
class ARProvinceSelect(Select):
"""
A Select widget that uses a list of Argentinean provinces/autonomous cities
as its choices.
"""
def __init__(self, attrs=None):
from ar_provinces import PROVINCE_CHOICES
super(ARProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class ARPostalCodeField(RegexField):
"""
A field that accepts a 'classic' NNNN Postal Code or a CPA.
See http://www.correoargentino.com.ar/consulta_cpa/home.php
"""
default_error_messages = {
'invalid': _("Enter a postal code in the format NNNN or ANNNNAAA."),
}
def __init__(self, *args, **kwargs):
super(ARPostalCodeField, self).__init__(r'^\d{4}$|^[A-HJ-NP-Za-hj-np-z]\d{4}\D{3}$',
min_length=4, max_length=8, *args, **kwargs)
def clean(self, value):
value = super(ARPostalCodeField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if len(value) not in (4, 8):
raise ValidationError(self.error_messages['invalid'])
if len(value) == 8:
return u'%s%s%s' % (value[0].upper(), value[1:5], value[5:].upper())
return value
class ARDNIField(CharField):
"""
A field that validates 'Documento Nacional de Identidad' (DNI) numbers.
"""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 7 or 8 digits."),
}
def __init__(self, *args, **kwargs):
super(ARDNIField, self).__init__(max_length=10, min_length=7, *args,
**kwargs)
def clean(self, value):
"""
Value can be a string either in the [X]X.XXX.XXX or [X]XXXXXXX formats.
"""
value = super(ARDNIField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not value.isdigit():
value = value.replace('.', '')
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) not in (7, 8):
raise ValidationError(self.error_messages['max_digits'])
return value
class ARCUITField(RegexField):
"""
This field validates a CUIT (Código Único de Identificación Tributaria). A
CUIT is of the form XX-XXXXXXXX-V. The last digit is a check digit.
"""
default_error_messages = {
'invalid': _('Enter a valid CUIT in XX-XXXXXXXX-X or XXXXXXXXXXXX format.'),
'checksum': _("Invalid CUIT."),
}
def __init__(self, *args, **kwargs):
super(ARCUITField, self).__init__(r'^\d{2}-?\d{8}-?\d$',
*args, **kwargs)
def clean(self, value):
"""
Value can be either a string in the format XX-XXXXXXXX-X or an
11-digit number.
"""
value = super(ARCUITField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value, cd = self._canon(value)
if self._calc_cd(value) != cd:
raise ValidationError(self.error_messages['checksum'])
return self._format(value, cd)
def _canon(self, cuit):
cuit = cuit.replace('-', '')
return cuit[:-1], cuit[-1]
def _calc_cd(self, cuit):
mults = (5, 4, 3, 2, 7, 6, 5, 4, 3, 2)
tmp = sum([m * int(cuit[idx]) for idx, m in enumerate(mults)])
return str(11 - tmp % 11)
def _format(self, cuit, check_digit=None):
if check_digit == None:
check_digit = cuit[-1]
cuit = cuit[:-1]
return u'%s-%s-%s' % (cuit[:2], cuit[2:], check_digit)
|
bsd-3-clause
|
Mistobaan/tensorflow
|
tensorflow/contrib/learn/python/learn/datasets/load_csv_test.py
|
137
|
1348
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.python.platform import test
class LoadCsvTest(test.TestCase):
"""Test load csv functions."""
def testIris(self):
iris = datasets.load_iris()
self.assertTupleEqual(iris.data.shape, (150, 4))
self.assertTupleEqual(iris.target.shape, (150,))
def testBoston(self):
boston = datasets.load_boston()
self.assertTupleEqual(boston.data.shape, (506, 13))
self.assertTupleEqual(boston.target.shape, (506,))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
sid-kap/pants
|
src/python/pants/console/stty_utils.py
|
30
|
1090
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import subprocess
from contextlib import contextmanager
@contextmanager
def preserve_stty_settings():
"""Run potentially stty-modifying operations, e.g., REPL execution, in this contextmanager."""
stty_settings = STTYSettings()
stty_settings.save_stty_options()
yield
stty_settings.restore_ssty_options()
class STTYSettings(object):
"""Saves/restores stty settings, e.g., during REPL execution."""
def __init__(self):
self._stty_options = None
def save_stty_options(self):
self._stty_options = self._run_cmd('stty -g 2>/dev/null')
def restore_ssty_options(self):
self._run_cmd('stty ' + self._stty_options)
def _run_cmd(self, cmd):
po = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, _ = po.communicate()
return stdout
|
apache-2.0
|
vgrem/Office365-REST-Python-Client
|
office365/directory/objectIdentity.py
|
1
|
1662
|
from office365.runtime.client_value import ClientValue
class ObjectIdentity(ClientValue):
"""
Represents an identity used to sign in to a user account.
"""
def __init__(self, signInType=None, issuer=None, issuerAssignedId=None):
"""
:param str signInType: Specifies the user sign-in types in your directory, such as emailAddress, userName
or federated. Here, federated represents a unique identifier for a user from an issuer, that can be in
any format chosen by the issuer. Additional validation is enforced on issuerAssignedId when the sign-in
type is set to emailAddress or userName. This property can also be set to any custom string.
:param str issuer: Specifies the issuer of the identity, for example facebook.com.
For local accounts (where signInType is not federated), this property is the local B2C tenant default
domain name, for example contoso.onmicrosoft.com.
For external users from other Azure AD organization, this will be the domain of the federated organization,
for example contoso.com.
:param str issuerAssignedId: Specifies the unique identifier assigned to the user by the issuer.
The combination of issuer and issuerAssignedId must be unique within the organization. Represents
the sign-in name for the user, when signInType is set to emailAddress or userName
(also known as local accounts).
"""
super(ObjectIdentity, self).__init__()
self.signInType = signInType
self.issuer = issuer
self.issuerAssignedId = issuerAssignedId
|
mit
|
caot/intellij-community
|
python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/threadsafe.py
|
110
|
2802
|
import threading
from django.contrib.gis.geos.libgeos import lgeos, notice_h, error_h, CONTEXT_PTR
class GEOSContextHandle(object):
"""
Python object representing a GEOS context handle.
"""
def __init__(self):
# Initializing the context handler for this thread with
# the notice and error handler.
self.ptr = lgeos.initGEOS_r(notice_h, error_h)
def __del__(self):
if self.ptr: lgeos.finishGEOS_r(self.ptr)
# Defining a thread-local object and creating an instance
# to hold a reference to GEOSContextHandle for this thread.
class GEOSContext(threading.local):
handle = None
thread_context = GEOSContext()
def call_geos_threaded(cfunc, args):
"""
This module-level routine calls the specified GEOS C thread-safe
function with the context for this current thread.
"""
# If a context handle does not exist for this thread, initialize one.
if not thread_context.handle:
thread_context.handle = GEOSContextHandle()
# Call the threaded GEOS routine with pointer of the context handle
# as the first argument.
return cfunc(thread_context.handle.ptr, *args)
class GEOSFunc(object):
"""
Class that serves as a wrapper for GEOS C Functions, and will
use thread-safe function variants when available.
"""
def __init__(self, func_name):
try:
# GEOS thread-safe function signatures end with '_r', and
# take an additional context handle parameter.
self.cfunc = getattr(lgeos, func_name + '_r')
self.threaded = True
except AttributeError:
# Otherwise, use usual function.
self.cfunc = getattr(lgeos, func_name)
self.threaded = False
def __call__(self, *args):
if self.threaded:
return call_geos_threaded(self.cfunc, args)
else:
return self.cfunc(*args)
def __str__(self):
return self.cfunc.__name__
# argtypes property
def _get_argtypes(self):
return self.cfunc.argtypes
def _set_argtypes(self, argtypes):
if self.threaded:
new_argtypes = [CONTEXT_PTR]
new_argtypes.extend(argtypes)
self.cfunc.argtypes = new_argtypes
else:
self.cfunc.argtypes = argtypes
argtypes = property(_get_argtypes, _set_argtypes)
# restype property
def _get_restype(self):
return self.cfunc.restype
def _set_restype(self, restype):
self.cfunc.restype = restype
restype = property(_get_restype, _set_restype)
# errcheck property
def _get_errcheck(self):
return self.cfunc.errcheck
def _set_errcheck(self, errcheck):
self.cfunc.errcheck = errcheck
errcheck = property(_get_errcheck, _set_errcheck)
|
apache-2.0
|
TheWardoctor/Wardoctors-repo
|
script.module.exodus/lib/resources/lib/sources/pl/cdahd.py
|
5
|
6682
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2017 homik
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urlparse, re
from resources.lib.modules import cleantitle
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['cda-hd.pl']
self.base_link = 'http://cda-hd.pl/'
self.search_link = '/?s=%s'
def do_search(self, title, local_title, year, video_type):
try:
url = urlparse.urljoin(self.base_link, self.search_link)
url = url % urllib.quote_plus(cleantitle.query(title))
result = client.request(url)
result = client.parseDOM(result, 'div', attrs={'class': 'item'})
for row in result:
row_type = client.parseDOM(row, 'div', attrs={'class': 'typepost'})[0]
if row_type != video_type:
continue
names = client.parseDOM(row, 'span', attrs={'class': 'tt'})[0]
names = names.split('/')
year_found = client.parseDOM(row, 'span', attrs={'class': 'year'})
titles = [cleantitle.get(i) for i in [title,local_title]]
if self.name_matches(names, titles, year) and (len(year_found) == 0 or year_found[0] == year):
url = client.parseDOM(row, 'a', ret='href')[0]
return urlparse.urljoin(self.base_link, url)
except :
return
def name_matches(self, names_found, titles, year):
for name in names_found:
name = name.strip().encode('utf-8')
# if ends with year
clean_found_title = cleantitle.get(name)
# sometimes they add year to title so we need to check thet
if clean_found_title in titles:
return True
return False
def get_first_not_none(self, collection):
return next(item for item in collection if item is not None)
def movie(self, imdb, title, localtitle, aliases, year):
return self.do_search(title, localtitle, year, 'Film')
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return self.do_search(tvshowtitle, localtvshowtitle, year, 'Serial')
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
result = client.request(url)
# cant user dom parser here because HTML is bugged div is not closed
result = re.findall ('<ul class="episodios">(.*?)</ul>', result, re.MULTILINE | re.DOTALL)
for item in result:
season_episodes = re.findall ('<li>(.*?)</li>', item, re.MULTILINE | re.DOTALL)
for row in season_episodes:
s = client.parseDOM(row, 'div', attrs={'class': 'numerando'})[0].split('x')
season_found = s[0].strip()
episode_found = s[1].strip()
if(season_found != season):
break
if episode_found == episode :
return client.parseDOM(row, 'a', ret='href')[0]
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
result = client.request(url)
box_result = client.parseDOM(result, 'li', attrs={'class': 'elemento'})
if(len(box_result) != 0):
sources = self.get_links_from_box(box_result)
sources += self.get_from_main_player(result, sources)
return sources
except:
return sources
def get_from_main_player(self, result, sources):
q = 'SD'
if len(sources) == 0 and (len(client.parseDOM(result, 'span', attrs={'class': 'calidad2'})) > 0):
q = 'HD'
player2 = client.parseDOM(result, 'div', attrs={'id': 'player2'})
links = client.parseDOM(player2, 'iframe', ret='src')
player_nav = client.parseDOM(result, 'div', attrs={'class': 'player_nav'})
transl_type = client.parseDOM(player_nav, 'a')
result_sources = []
for i in range(0, len(links)):
url = links[i]
if(self.url_not_on_list(url, sources)):
lang, info = self.get_lang_by_type(transl_type[i])
host = url.split("//")[-1].split("/")[0]
result_sources.append({'source': host, 'quality': q, 'language': lang, 'url': url, 'info': info, 'direct': False, 'debridonly': False})
return result_sources
def url_not_on_list(self, url, sources):
for el in sources:
if el.get('url') == url:
return False
return True
def get_links_from_box(self, result):
sources = []
for row in result:
src_url = client.parseDOM(row, 'a', ret='href')[0]
lang_type = client.parseDOM(row, 'span', attrs={'class': 'c'})[0]
quality_type = client.parseDOM(row, 'span', attrs={'class': 'd'})[0]
host = client.parseDOM(row, 'img', ret='alt')[0]
lang, info = self.get_lang_by_type(lang_type)
q = 'SD'
if quality_type == 'Wysoka':q = 'HD'
sources.append({'source': host, 'quality': q, 'language': lang, 'url': src_url, 'info': info, 'direct': False, 'debridonly': False})
return sources
def get_lang_by_type(self, lang_type):
if lang_type == 'Lektor PL':
return 'pl', 'Lektor'
if lang_type == 'Dubbing PL':
return 'pl', 'Dubbing'
if lang_type == 'Napisy PL':
return 'pl', 'Napisy'
if lang_type == 'PL':
return 'pl', None
return 'en', None
def resolve(self, url):
return url
|
apache-2.0
|
sirchia/CouchPotatoServer
|
couchpotato/core/providers/nzb/moovee/main.py
|
2
|
2468
|
from couchpotato.core.event import fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from dateutil.parser import parse
import re
import time
log = CPLog(__name__)
class Moovee(NZBProvider):
urls = {
'download': 'http://85.214.105.230/get_nzb.php?id=%s§ion=moovee',
'search': 'http://abmoovee.allfilled.com/search.php?q=%s&Search=Search',
}
regex = '<td class="cell_reqid">(?P<reqid>.*?)</td>.+?<td class="cell_request">(?P<title>.*?)</td>.+?<td class="cell_statuschange">(?P<age>.*?)</td>'
http_time_between_calls = 2 # Seconds
def search(self, movie, quality):
results = []
if self.isDisabled() or not self.isAvailable(self.urls['search']) or quality.get('hd', False):
return results
q = '%s %s' % (movie['library']['titles'][0]['title'], quality.get('identifier'))
url = self.urls['search'] % tryUrlencode(q)
cache_key = 'moovee.%s' % q
data = self.getCache(cache_key, url)
if data:
match = re.compile(self.regex, re.DOTALL).finditer(data)
for nzb in match:
new = {
'id': nzb.group('reqid'),
'name': nzb.group('title'),
'type': 'nzb',
'provider': self.getName(),
'age': self.calculateAge(time.mktime(parse(nzb.group('age')).timetuple())),
'size': None,
'url': self.urls['download'] % (nzb.group('reqid')),
'detail_url': '',
'description': '',
'check_nzb': False,
}
new['score'] = fireEvent('score.calculate', new, movie, single = True)
is_correct_movie = fireEvent('searcher.correct_movie',
nzb = new, movie = movie, quality = quality,
imdb_results = False, single_category = False, single = True)
if is_correct_movie:
results.append(new)
self.found(new)
return results
def belongsTo(self, url, host = None):
match = re.match('http://85\.214\.105\.230/get_nzb\.php\?id=[0-9]*§ion=moovee', url)
if match:
return self
return
|
gpl-3.0
|
aisipos/django
|
tests/postgres_tests/test_ranges.py
|
15
|
25276
|
import datetime
import json
from django import forms
from django.core import exceptions, serializers
from django.db.models import F
from django.test import override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import RangeLookupsModel, RangesModel
try:
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django.contrib.postgres import fields as pg_fields, forms as pg_forms
from django.contrib.postgres.validators import (
RangeMaxValueValidator, RangeMinValueValidator,
)
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_all_fields(self):
now = timezone.now()
instance = RangesModel(
ints=NumericRange(0, 10),
bigints=NumericRange(10, 20),
floats=NumericRange(20, 30),
timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now),
dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()),
)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(instance.ints, loaded.ints)
self.assertEqual(instance.bigints, loaded.bigints)
self.assertEqual(instance.floats, loaded.floats)
self.assertEqual(instance.timestamps, loaded.timestamps)
self.assertEqual(instance.dates, loaded.dates)
def test_range_object(self):
r = NumericRange(0, 10)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_tuple(self):
instance = RangesModel(ints=(0, 10))
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(NumericRange(0, 10), loaded.ints)
def test_range_object_boundaries(self):
r = NumericRange(0, 10, '[]')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
self.assertTrue(10 in loaded.floats)
def test_unbounded(self):
r = NumericRange(None, None, '()')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
def test_empty(self):
r = NumericRange(empty=True)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_null(self):
instance = RangesModel(ints=None)
instance.save()
loaded = RangesModel.objects.get()
self.assertIsNone(loaded.ints)
def test_model_set_on_base_field(self):
instance = RangesModel()
field = instance._meta.get_field('ints')
self.assertEqual(field.model, RangesModel)
self.assertEqual(field.base_field.model, RangesModel)
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
RangesModel.objects.create(ints=NumericRange(0, 10)),
RangesModel.objects.create(ints=NumericRange(5, 15)),
RangesModel.objects.create(ints=NumericRange(None, 0)),
RangesModel.objects.create(ints=NumericRange(empty=True)),
RangesModel.objects.create(ints=None),
]
def test_exact(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__exact=NumericRange(0, 10)),
[self.objs[0]],
)
def test_isnull(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isnull=True),
[self.objs[4]],
)
def test_isempty(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isempty=True),
[self.objs[3]],
)
def test_contains(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=8),
[self.objs[0], self.objs[1]],
)
def test_contains_range(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=NumericRange(3, 8)),
[self.objs[0]],
)
def test_contained_by(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)),
[self.objs[0], self.objs[1], self.objs[3]],
)
def test_overlap(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)),
[self.objs[0], self.objs[1]],
)
def test_fully_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)),
[self.objs[2]],
)
def test_fully_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)),
[],
)
def test_not_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)),
[self.objs[1]],
)
def test_not_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)),
[self.objs[0], self.objs[2]],
)
def test_adjacent_to(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)),
[self.objs[1], self.objs[2]],
)
def test_startswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith=0),
[self.objs[0]],
)
def test_endswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__endswith=0),
[self.objs[2]],
)
def test_startswith_chaining(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith__gte=0),
[self.objs[0], self.objs[1]],
)
class TestQueryingWithRanges(PostgreSQLTestCase):
def test_date_range(self):
objs = [
RangeLookupsModel.objects.create(date='2015-01-01'),
RangeLookupsModel.objects.create(date='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_date_range_datetime_field(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01'),
RangeLookupsModel.objects.create(timestamp='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(timestamp__date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_datetime_range(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01T09:00:00'),
RangeLookupsModel.objects.create(timestamp='2015-05-05T17:00:00'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
timestamp__contained_by=DateTimeTZRange('2015-01-01T09:00', '2015-05-04T23:55')
),
[objs[0]],
)
def test_integer_range(self):
objs = [
RangeLookupsModel.objects.create(integer=5),
RangeLookupsModel.objects.create(integer=99),
RangeLookupsModel.objects.create(integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_biginteger_range(self):
objs = [
RangeLookupsModel.objects.create(big_integer=5),
RangeLookupsModel.objects.create(big_integer=99),
RangeLookupsModel.objects.create(big_integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(big_integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_float_range(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_f_ranges(self):
parent = RangesModel.objects.create(floats=NumericRange(0, 10))
objs = [
RangeLookupsModel.objects.create(float=5, parent=parent),
RangeLookupsModel.objects.create(float=99, parent=parent),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=F('parent__floats')),
[objs[0]]
)
def test_exclude(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)),
[objs[2]]
)
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"ints": "{\\"upper\\": \\"10\\", \\"lower\\": \\"0\\", '
'\\"bounds\\": \\"[)\\"}", "floats": "{\\"empty\\": true}", '
'"bigints": null, "timestamps": "{\\"upper\\": \\"2014-02-02T12:12:12+00:00\\", '
'\\"lower\\": \\"2014-01-01T00:00:00+00:00\\", \\"bounds\\": \\"[)\\"}", '
'"dates": "{\\"upper\\": \\"2014-02-02\\", \\"lower\\": \\"2014-01-01\\", \\"bounds\\": \\"[)\\"}" }, '
'"model": "postgres_tests.rangesmodel", "pk": null}]'
)
lower_date = datetime.date(2014, 1, 1)
upper_date = datetime.date(2014, 2, 2)
lower_dt = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc)
upper_dt = datetime.datetime(2014, 2, 2, 12, 12, 12, tzinfo=timezone.utc)
def test_dumping(self):
instance = RangesModel(
ints=NumericRange(0, 10), floats=NumericRange(empty=True),
timestamps=DateTimeTZRange(self.lower_dt, self.upper_dt),
dates=DateRange(self.lower_date, self.upper_date),
)
data = serializers.serialize('json', [instance])
dumped = json.loads(data)
for field in ('ints', 'dates', 'timestamps'):
dumped[0]['fields'][field] = json.loads(dumped[0]['fields'][field])
check = json.loads(self.test_data)
for field in ('ints', 'dates', 'timestamps'):
check[0]['fields'][field] = json.loads(check[0]['fields'][field])
self.assertEqual(dumped, check)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.ints, NumericRange(0, 10))
self.assertEqual(instance.floats, NumericRange(empty=True))
self.assertEqual(instance.bigints, None)
self.assertEqual(instance.dates, DateRange(self.lower_date, self.upper_date))
self.assertEqual(instance.timestamps, DateTimeTZRange(self.lower_dt, self.upper_dt))
def test_serialize_range_with_null(self):
instance = RangesModel(ints=NumericRange(None, 10))
data = serializers.serialize('json', [instance])
new_instance = list(serializers.deserialize('json', data))[0].object
self.assertEqual(new_instance.ints, NumericRange(None, 10))
instance = RangesModel(ints=NumericRange(10, None))
data = serializers.serialize('json', [instance])
new_instance = list(serializers.deserialize('json', data))[0].object
self.assertEqual(new_instance.ints, NumericRange(10, None))
class TestValidators(PostgreSQLTestCase):
def test_max(self):
validator = RangeMaxValueValidator(5)
validator(NumericRange(0, 5))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely less than or equal to 5.')
self.assertEqual(cm.exception.code, 'max_value')
def test_min(self):
validator = RangeMinValueValidator(5)
validator(NumericRange(10, 15))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely greater than or equal to 5.')
self.assertEqual(cm.exception.code, 'min_value')
class TestFormField(PostgreSQLTestCase):
def test_valid_integer(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['1', '2'])
self.assertEqual(value, NumericRange(1, 2))
def test_valid_floats(self):
field = pg_forms.FloatRangeField()
value = field.clean(['1.12345', '2.001'])
self.assertEqual(value, NumericRange(1.12345, 2.001))
def test_valid_timestamps(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12'])
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(value, DateTimeTZRange(lower, upper))
def test_valid_dates(self):
field = pg_forms.DateRangeField()
value = field.clean(['01/01/2014', '02/02/2014'])
lower = datetime.date(2014, 1, 1)
upper = datetime.date(2014, 2, 2)
self.assertEqual(value, DateRange(lower, upper))
def test_using_split_datetime_widget(self):
class SplitDateTimeRangeField(pg_forms.DateTimeRangeField):
base_field = forms.SplitDateTimeField
class SplitForm(forms.Form):
field = SplitDateTimeRangeField()
form = SplitForm()
self.assertHTMLEqual(str(form), '''
<tr>
<th>
<label for="id_field_0">Field:</label>
</th>
<td>
<input id="id_field_0_0" name="field_0_0" type="text" />
<input id="id_field_0_1" name="field_0_1" type="text" />
<input id="id_field_1_0" name="field_1_0" type="text" />
<input id="id_field_1_1" name="field_1_1" type="text" />
</td>
</tr>
''')
form = SplitForm({
'field_0_0': '01/01/2014',
'field_0_1': '00:00:00',
'field_1_0': '02/02/2014',
'field_1_1': '12:12:12',
})
self.assertTrue(form.is_valid())
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper))
def test_none(self):
field = pg_forms.IntegerRangeField(required=False)
value = field.clean(['', ''])
self.assertEqual(value, None)
def test_rendering(self):
class RangeForm(forms.Form):
ints = pg_forms.IntegerRangeField()
self.assertHTMLEqual(str(RangeForm()), '''
<tr>
<th><label for="id_ints_0">Ints:</label></th>
<td>
<input id="id_ints_0" name="ints_0" type="number" />
<input id="id_ints_1" name="ints_1" type="number" />
</td>
</tr>
''')
def test_integer_lower_bound_higher(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['10', '2'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_integer_open(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['', '0'])
self.assertEqual(value, NumericRange(None, 0))
def test_integer_incorrect_data_type(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_integer_invalid_lower(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_invalid_upper(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_required(self):
field = pg_forms.IntegerRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean([1, ''])
self.assertEqual(value, NumericRange(1, None))
def test_float_lower_bound_higher(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.8', '1.6'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_float_open(self):
field = pg_forms.FloatRangeField()
value = field.clean(['', '3.1415926'])
self.assertEqual(value, NumericRange(None, 3.1415926))
def test_float_incorrect_data_type(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1.6')
self.assertEqual(cm.exception.messages[0], 'Enter two numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_float_invalid_lower(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '3.1415926'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_invalid_upper(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.61803399', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_required(self):
field = pg_forms.FloatRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1.61803399', ''])
self.assertEqual(value, NumericRange(1.61803399, None))
def test_date_lower_bound_higher(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', '1976-04-16'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_date_open(self):
field = pg_forms.DateRangeField()
value = field.clean(['', '2013-04-09'])
self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9)))
def test_date_incorrect_data_type(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.')
self.assertEqual(cm.exception.code, 'invalid')
def test_date_invalid_lower(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2013-04-09'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_invalid_upper(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_required(self):
field = pg_forms.DateRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1976-04-16', ''])
self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None))
def test_datetime_lower_bound_higher(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2006-10-25 14:59', '2006-10-25 14:58'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_datetime_open(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['', '2013-04-09 11:45'])
self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45)))
def test_datetime_incorrect_data_type(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('2013-04-09 11:45')
self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.')
self.assertEqual(cm.exception.code, 'invalid')
def test_datetime_invalid_lower(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['45', '2013-04-09 11:45'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_invalid_upper(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09 11:45', 'sweet pickles'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_required(self):
field = pg_forms.DateTimeRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['2013-04-09 11:45', ''])
self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None))
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Johannesburg')
def test_datetime_prepare_value(self):
field = pg_forms.DateTimeRangeField()
value = field.prepare_value(
DateTimeTZRange(datetime.datetime(2015, 5, 22, 16, 6, 33, tzinfo=timezone.utc), None)
)
self.assertEqual(value, [datetime.datetime(2015, 5, 22, 18, 6, 33), None])
def test_model_field_formfield_integer(self):
model_field = pg_fields.IntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_biginteger(self):
model_field = pg_fields.BigIntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_float(self):
model_field = pg_fields.FloatRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.FloatRangeField)
def test_model_field_formfield_date(self):
model_field = pg_fields.DateRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateRangeField)
def test_model_field_formfield_datetime(self):
model_field = pg_fields.DateTimeRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateTimeRangeField)
class TestWidget(PostgreSQLTestCase):
def test_range_widget(self):
f = pg_forms.ranges.DateTimeRangeField()
self.assertHTMLEqual(
f.widget.render('datetimerange', ''),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
self.assertHTMLEqual(
f.widget.render('datetimerange', None),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
dt_range = DateTimeTZRange(
datetime.datetime(2006, 1, 10, 7, 30),
datetime.datetime(2006, 2, 12, 9, 50)
)
self.assertHTMLEqual(
f.widget.render('datetimerange', dt_range),
'<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00" />'
'<input type="text" name="datetimerange_1" value="2006-02-12 09:50:00" />'
)
|
bsd-3-clause
|
titanxxh/xengt-ha-kernel
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
5411
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
gpl-2.0
|
wd5/jangr
|
django/contrib/admin/actions.py
|
160
|
3285
|
"""
Built-in, globally-available admin actions.
"""
from django import template
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.shortcuts import render_to_response
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_unicode(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_unicode(opts.verbose_name)
else:
objects_name = force_unicode(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"root_path": modeladmin.admin_site.root_path,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return render_to_response(modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, context_instance=template.RequestContext(request))
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
|
bsd-3-clause
|
arborh/tensorflow
|
tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py
|
5
|
12858
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("b/123903858: Add eager and V2 test coverage")
class MapDefunTest(test_base.DatasetTestBase):
def testNoIntraOpLimit(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(
simple_fn, [elems], [dtypes.int32], [(2,)],
max_intra_op_parallelism=0)[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunSimple(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(2,)])[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunMismatchedTypes(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return math_ops.cast(x, dtypes.float64)
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
def testMapDefunReduceDim(self):
# Tests where the output has a different rank from the input
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return array_ops.gather(x, 0)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
expected = constant_op.constant([1, 3, 5])
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunMultipleOutputs(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return (x, math_ops.cast(x * 2 + 3, dtypes.float64))
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32, dtypes.float64], [(2,),
(2,)])
expected = [elems, elems * 2 + 3]
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunShapeInference(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])[0]
self.assertEqual(result.get_shape(), (3, 2))
def testMapDefunPartialShapeInference(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
elems = array_ops.placeholder(dtypes.int64, (None, 2))
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])
self.assertEqual(result[0].get_shape().as_list(), [None, 2])
def testMapDefunRaisesErrorOnRuntimeShapeMismatch(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec(None, dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32)
])
def fn(x, y):
return x, y
elems1 = array_ops.placeholder(dtypes.int32)
elems2 = array_ops.placeholder(dtypes.int32)
result = map_defun.map_defun(fn, [elems1, elems2],
[dtypes.int32, dtypes.int32], [(), ()])
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"All inputs must have the same dimension 0."):
sess.run(result, feed_dict={elems1: [1, 2, 3, 4, 5], elems2: [1, 2, 3]})
def testMapDefunRaisesDefunError(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return array_ops.identity(x)
elems = constant_op.constant([0, 0, 0, 37, 0])
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(result)
def testMapDefunCancelledCorrectly(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([5], dtypes.int64)])
def defun(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
c = array_ops.tile(
array_ops.expand_dims(
constant_op.constant([1, 2, 3, 4, 5], dtype=dtypes.int64), 0),
[100, 1])
map_defun_op = map_defun.map_defun(defun, [c], [dtypes.int64], [()])[0]
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
self.evaluate(map_defun_op)
def testMapDefunWithUnspecifiedOutputShape(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
res = x * 2 + 3
return (res, res + 1, res + 2)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems],
[dtypes.int32, dtypes.int32, dtypes.int32],
[None, (None,), (2,)])
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r[0]), self.evaluate(expected))
self.assertAllEqual(self.evaluate(r[1]), self.evaluate(expected + 1))
self.assertAllEqual(self.evaluate(r[2]), self.evaluate(expected + 2))
def testMapDefunWithDifferentOutputShapeEachRun(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
elems = array_ops.placeholder(dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [None])[0]
with session.Session() as sess:
self.assertAllEqual(sess.run(r, feed_dict={elems: [0]}), [3])
self.assertAllEqual(
sess.run(r, feed_dict={elems: [[0], [1]]}), [[3], [5]])
def testMapDefunWithWrongOutputShape(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(1,)])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
def testMapDefunWithInvalidInput(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2
c = constant_op.constant(2)
with self.assertRaises(ValueError):
# Fails at graph construction time for inputs with known shapes.
r = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [None])[0]
p = array_ops.placeholder(dtypes.int32)
r = map_defun.map_defun(simple_fn, [p], [dtypes.int32], [None])[0]
with session.Session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(r, feed_dict={p: 0})
def testMapDefunWithParentCancellation(self):
# Checks that a cancellation of the parent graph is threaded through to
# MapDefunOp correctly.
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def simple_fn(x):
del x
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
# Blocking
return queue.dequeue_many(5)
c = constant_op.constant([1, 2, 3, 4, 5])
map_defun_op = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [()])[0]
with self.cached_session() as sess:
thread = self.checkedThread(
self.assert_op_cancelled, args=(map_defun_op,))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
def testMapDefunWithCapturedInputs(self):
c = constant_op.constant(2)
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return x + c
x = constant_op.constant([1, 2, 3, 4])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.int32], [()])[0]
expected = x + c
self.assertAllEqual(self.evaluate(expected), self.evaluate(map_defun_op))
def testMapDefunWithVariantTensor(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.variant)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
serialized = array_ops.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.variant],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
def testMapDefunWithVariantTensorAsCaptured(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
del x
return serialized
x = constant_op.constant([0, 0])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.variant], [None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
def testMapDefunWithStrTensor(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.string)
serialized = array_ops.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.string],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertValuesEqual(expected, actual)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
aimejeux/enigma2
|
lib/python/Screens/ParentalControlSetup.py
|
4
|
14940
|
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import NumberActionMap
from Components.config import config, getConfigListEntry, ConfigNothing, NoSave, ConfigPIN
from Components.ParentalControlList import ParentalControlEntryComponent, ParentalControlList
from Components.Sources.StaticText import StaticText
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.InputBox import PinInput
from Screens.ChannelSelection import service_types_tv
from Tools.BoundFunction import boundFunction
from enigma import eServiceCenter, eTimer, eServiceReference
from operator import itemgetter
class ProtectedScreen:
def __init__(self):
if self.isProtected():
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.pinEntered, PinInput, pinList = [self.protectedWithPin()], triesEntry = self.getTriesEntry(), title = self.getPinText(), windowTitle = _("Enter pin code")))
def getTriesEntry(self):
return config.ParentalControl.retries.setuppin
def getPinText(self):
return _("Please enter the correct pin code")
def isProtected(self):
return True
def protectedWithPin(self):
return config.ParentalControl.setuppin.getValue()
def pinEntered(self, result):
if result is None:
self.close()
elif not result:
self.session.openWithCallback(self.close, MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR)
class ParentalControlSetup(Screen, ConfigListScreen, ProtectedScreen):
def __init__(self, session):
Screen.__init__(self, session)
ProtectedScreen.__init__(self)
# for the skin: first try ParentalControlSetup, then Setup, this allows individual skinning
self.skinName = ["ParentalControlSetup", "Setup" ]
self.setup_title = _("Parental control setup")
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self.createSetup()
self["actions"] = NumberActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keyCancel,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def isProtected(self):
return config.ParentalControl.setuppinactive.getValue() and config.ParentalControl.configured.getValue()
def createSetup(self):
self.editListEntry = None
self.changePin = None
self.changeSetupPin = None
self.list = []
self.list.append(getConfigListEntry(_("Enable parental control"), config.ParentalControl.configured))
print "config.ParentalControl.configured.getValue()", config.ParentalControl.configured.getValue()
self.editBouquetListEntry = -1
self.reloadLists = -1
if config.ParentalControl.configured.getValue():
#self.list.append(getConfigListEntry(_("Configuration mode"), config.ParentalControl.mode))
self.list.append(getConfigListEntry(_("Protect setup"), config.ParentalControl.setuppinactive))
if config.ParentalControl.setuppinactive.getValue():
self.changeSetupPin = getConfigListEntry(_("Change setup PIN"), NoSave(ConfigNothing()))
self.list.append(self.changeSetupPin)
self.list.append(getConfigListEntry(_("Protect services"), config.ParentalControl.servicepinactive))
if config.ParentalControl.servicepinactive.getValue():
self.list.append(getConfigListEntry(_("Parental control type"), config.ParentalControl.type))
if config.ParentalControl.mode.getValue() == "complex":
self.changePin = getConfigListEntry(_("Change service PINs"), NoSave(ConfigNothing()))
self.list.append(self.changePin)
elif config.ParentalControl.mode.getValue() == "simple":
self.changePin = getConfigListEntry(_("Change service PIN"), NoSave(ConfigNothing()))
self.list.append(self.changePin)
#Added Option to remember the service pin
self.list.append(getConfigListEntry(_("Remember service PIN"), config.ParentalControl.storeservicepin))
self.editListEntry = getConfigListEntry(_("Edit services list"), NoSave(ConfigNothing()))
self.list.append(self.editListEntry)
#New funtion: Possibility to add Bouquets to whitelist / blacklist
self.editBouquetListEntry = getConfigListEntry(_("Edit bouquets list"), NoSave(ConfigNothing()))
self.list.append(self.editBouquetListEntry)
#New option to reload service lists (for example if bouquets have changed)
self.reloadLists = getConfigListEntry(_("Reload black-/white lists"), NoSave(ConfigNothing()))
self.list.append(self.reloadLists)
self["config"].list = self.list
self["config"].setList(self.list)
def keyOK(self):
print "self[\"config\"].l.getCurrentSelection()", self["config"].l.getCurrentSelection()
if self["config"].l.getCurrentSelection() == self.editListEntry:
self.session.open(ParentalControlEditor)
elif self["config"].l.getCurrentSelection() == self.editBouquetListEntry:
self.session.open(ParentalControlBouquetEditor)
elif self["config"].l.getCurrentSelection() == self.changePin:
if config.ParentalControl.mode.getValue() == "complex":
pass
else:
self.session.open(ParentalControlChangePin, config.ParentalControl.servicepin[0], _("service PIN"))
elif self["config"].l.getCurrentSelection() == self.changeSetupPin:
self.session.open(ParentalControlChangePin, config.ParentalControl.setuppin, _("setup PIN"))
elif self["config"].l.getCurrentSelection() == self.reloadLists:
from Components.ParentalControl import parentalControl
parentalControl.open()
else:
ConfigListScreen.keyRight(self)
print "current selection:", self["config"].l.getCurrentSelection()
self.createSetup()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
print "current selection:", self["config"].l.getCurrentSelection()
self.createSetup()
def keyRight(self):
ConfigListScreen.keyRight(self)
print "current selection:", self["config"].l.getCurrentSelection()
self.createSetup()
def SetupPinMessageCallback(self, value):
if value:
self.session.openWithCallback(self.cancelCB, ParentalControlChangePin, config.ParentalControl.setuppin, _("setup PIN"))
else:
config.ParentalControl.setuppinactive.setValue(False)
self.keyCancel()
def ServicePinMessageCallback(self, value):
if value:
self.session.openWithCallback(self.cancelCB, ParentalControlChangePin, config.ParentalControl.servicepin[0], _("service PIN"))
else:
config.ParentalControl.servicepinactive.setValue(False)
self.keyCancel()
def cancelCB(self,value):
self.keyCancel()
def keyCancel(self):
if config.ParentalControl.setuppinactive.getValue() and config.ParentalControl.setuppin.getValue() == 'aaaa':
self.session.openWithCallback(self.SetupPinMessageCallback, MessageBox, _("No valid setup PIN found!\nDo you like to change the setup PIN now?\nWhen you say 'No' here the setup protection stay disabled!"), MessageBox.TYPE_YESNO)
elif config.ParentalControl.servicepinactive.getValue() and config.ParentalControl.servicepin[0].getValue() == 'aaaa':
self.session.openWithCallback(self.ServicePinMessageCallback, MessageBox, _("No valid service PIN found!\nDo you like to change the service PIN now?\nWhen you say 'No' here the service protection stay disabled!"), MessageBox.TYPE_YESNO)
else:
for x in self["config"].list:
x[1].save()
self.close()
def keyNumberGlobal(self, number):
pass
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
SPECIAL_CHAR = 96
class ParentalControlEditor(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Parental control editor"))
self.list = []
self.servicelist = ParentalControlList(self.list)
self["servicelist"] = self.servicelist
#self.onShown.append(self.chooseLetter)
self.currentLetter = chr(SPECIAL_CHAR)
self.readServiceList()
self.chooseLetterTimer = eTimer()
self.chooseLetterTimer.callback.append(self.chooseLetter)
self.onLayoutFinish.append(self.LayoutFinished)
self["actions"] = NumberActionMap(["DirectionActions", "ColorActions", "OkCancelActions", "NumberActions"],
{
"ok": self.select,
"cancel": self.cancel,
#"left": self.keyLeft,
#"right": self.keyRight,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
def LayoutFinished(self):
self.chooseLetterTimer.start(0, True)
def cancel(self):
self.chooseLetter()
def select(self):
self.servicelist.toggleSelectedLock()
def keyNumberGlobal(self, number):
pass
def readServiceList(self):
serviceHandler = eServiceCenter.getInstance()
refstr = '%s ORDER BY name' % service_types_tv
self.root = eServiceReference(refstr)
self.servicesList = {}
list = serviceHandler.list(self.root)
if list is not None:
services = list.getContent("CN", True) #(servicecomparestring, name)
for s in services:
key = s[1].lower()[0]
if key < 'a' or key > 'z':
key = chr(SPECIAL_CHAR)
#key = str(key)
if not self.servicesList.has_key(key):
self.servicesList[key] = []
self.servicesList[key].append(s)
def chooseLetter(self):
print "choose letter"
mylist = []
for x in self.servicesList.keys():
if x == chr(SPECIAL_CHAR):
x = (_("special characters"), x)
else:
x = (x, x)
mylist.append(x)
mylist.sort(key=itemgetter(1))
sel = ord(self.currentLetter) - SPECIAL_CHAR
self.session.openWithCallback(self.letterChosen, ChoiceBox, title=_("Show services beginning with"), list=mylist, keys = [], selection = sel)
def letterChosen(self, result):
from Components.ParentalControl import parentalControl
if result is not None:
print "result:", result
self.currentLetter = result[1]
#Replace getProtectionLevel by new getProtectionType
self.list = [ParentalControlEntryComponent(x[0], x[1], parentalControl.getProtectionType(x[0])) for x in self.servicesList[result[1]]]
self.servicelist.setList(self.list)
else:
parentalControl.save()
self.close()
class ParentalControlBouquetEditor(Screen):
#This new class allows adding complete bouquets to black- and whitelists
#The servicereference that is stored for bouquets is their refstr as listed in bouquets.tv
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = "ParentalControlEditor"
self.list = []
self.bouquetslist = ParentalControlList(self.list)
self["servicelist"] = self.bouquetslist
self.readBouquetList()
self.onLayoutFinish.append(self.selectBouquet)
self["actions"] = NumberActionMap(["DirectionActions", "ColorActions", "OkCancelActions"],
{
"ok": self.select,
"cancel": self.cancel
}, -1)
def cancel(self):
from Components.ParentalControl import parentalControl
parentalControl.save()
self.close()
def select(self):
self.bouquetslist.toggleSelectedLock()
def readBouquetList(self):
serviceHandler = eServiceCenter.getInstance()
refstr = '1:134:1:0:0:0:0:0:0:0:FROM BOUQUET \"bouquets.tv\" ORDER BY bouquet'
bouquetroot = eServiceReference(refstr)
self.bouquetlist = {}
list = serviceHandler.list(bouquetroot)
if list is not None:
self.bouquetlist = list.getContent("CN", True)
def selectBouquet(self):
from Components.ParentalControl import parentalControl
self.list = [ParentalControlEntryComponent(x[0], x[1], parentalControl.getProtectionType(x[0])) for x in self.bouquetlist]
self.bouquetslist.setList(self.list)
class ParentalControlChangePin(Screen, ConfigListScreen, ProtectedScreen):
def __init__(self, session, pin, pinname):
Screen.__init__(self, session)
# for the skin: first try ParentalControlChangePin, then Setup, this allows individual skinning
self.skinName = ["ParentalControlChangePin", "Setup" ]
self.setup_title = _("Change pin code")
self.onChangedEntry = [ ]
self.pin = pin
self.list = []
self.pin1 = ConfigPIN(default = 1111, censor = "*")
self.pin2 = ConfigPIN(default = 1112, censor = "*")
self.pin1.addEndNotifier(boundFunction(self.valueChanged, 1))
self.pin2.addEndNotifier(boundFunction(self.valueChanged, 2))
self.list.append(getConfigListEntry(_("New PIN"), NoSave(self.pin1)))
self.list.append(getConfigListEntry(_("Reenter new PIN"), NoSave(self.pin2)))
ConfigListScreen.__init__(self, self.list)
# print "old pin:", pin
#if pin.value != "aaaa":
#self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.pinEntered, PinInput, pinList = [self.pin.value], title = _("please enter the old pin"), windowTitle = _("Change pin code")))
ProtectedScreen.__init__(self)
self["actions"] = NumberActionMap(["DirectionActions", "ColorActions", "OkCancelActions", "MenuActions"],
{
"cancel": self.keyCancel,
"red": self.keyCancel,
"save": self.keyOK,
"menu": self.closeRecursive,
}, -1)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def valueChanged(self, pin, value):
if pin == 1:
self["config"].setCurrentIndex(1)
elif pin == 2:
self.keyOK()
def getPinText(self):
return _("Please enter the old PIN code")
def isProtected(self):
return self.pin.getValue() != "aaaa"
def protectedWithPin(self):
return self.pin.getValue()
# def pinEntered(self, result):
#if result[0] is None:
#self.close()
#if not result[0]:
#print result, "-", self.pin.value
#self.session.openWithCallback(self.close, MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR)
def keyOK(self):
if self.pin1.getValue() == self.pin2.getValue():
self.pin.setValue(self.pin1.getValue())
self.pin.save()
self.session.openWithCallback(self.close, MessageBox, _("The PIN code has been changed successfully."), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("The PIN codes you entered are different."), MessageBox.TYPE_ERROR)
def keyNumberGlobal(self, number):
ConfigListScreen.keyNumberGlobal(self, number)
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
|
gpl-2.0
|
apanju/odoo
|
addons/purchase_requisition/wizard/purchase_requisition_partner.py
|
373
|
2320
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_requisition_partner(osv.osv_memory):
_name = "purchase.requisition.partner"
_description = "Purchase Requisition Partner"
_columns = {
'partner_id': fields.many2one('res.partner', 'Supplier', required=True,domain=[('supplier', '=', True)]),
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(purchase_requisition_partner, self).view_init(cr, uid, fields_list, context=context)
record_id = context and context.get('active_id', False) or False
tender = self.pool.get('purchase.requisition').browse(cr, uid, record_id, context=context)
if not tender.line_ids:
raise osv.except_osv(_('Error!'), _('Define product(s) you want to include in the call for bids.'))
return res
def create_order(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.requisition').make_purchase_order(cr, uid, active_ids, data.partner_id.id, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/unittest/test/test_assertions.py
|
4
|
17021
|
import datetime
import warnings
import weakref
import unittest
from itertools import product
class Test_Assertions(unittest.TestCase):
def test_AlmostEqual(self):
self.assertAlmostEqual(1.00000001, 1.0)
self.assertNotAlmostEqual(1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.0000001, 1.0)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 1.00000001, 1.0)
self.assertAlmostEqual(1.1, 1.0, places=0)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 1.1, 1.0, places=1)
self.assertAlmostEqual(0, .1+.1j, places=0)
self.assertNotAlmostEqual(0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertAlmostEqual, 0, .1+.1j, places=1)
self.assertRaises(self.failureException,
self.assertNotAlmostEqual, 0, .1+.1j, places=0)
self.assertAlmostEqual(float('inf'), float('inf'))
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
float('inf'), float('inf'))
def test_AmostEqualWithDelta(self):
self.assertAlmostEqual(1.1, 1.0, delta=0.5)
self.assertAlmostEqual(1.0, 1.1, delta=0.5)
self.assertNotAlmostEqual(1.1, 1.0, delta=0.05)
self.assertNotAlmostEqual(1.0, 1.1, delta=0.05)
self.assertAlmostEqual(1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.0, 1.0, delta=0.5)
self.assertRaises(self.failureException, self.assertAlmostEqual,
1.1, 1.0, delta=0.05)
self.assertRaises(self.failureException, self.assertNotAlmostEqual,
1.1, 1.0, delta=0.5)
self.assertRaises(TypeError, self.assertAlmostEqual,
1.1, 1.0, places=2, delta=2)
self.assertRaises(TypeError, self.assertNotAlmostEqual,
1.1, 1.0, places=2, delta=2)
first = datetime.datetime.now()
second = first + datetime.timedelta(seconds=10)
self.assertAlmostEqual(first, second,
delta=datetime.timedelta(seconds=20))
self.assertNotAlmostEqual(first, second,
delta=datetime.timedelta(seconds=5))
def test_assertRaises(self):
def _raise(e):
raise e
self.assertRaises(KeyError, _raise, KeyError)
self.assertRaises(KeyError, _raise, KeyError("key"))
try:
self.assertRaises(KeyError, lambda: None)
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
self.assertRaises(KeyError, _raise, ValueError)
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
with self.assertRaises(KeyError) as cm:
try:
raise KeyError
except Exception as e:
exc = e
raise
self.assertIs(cm.exception, exc)
with self.assertRaises(KeyError):
raise KeyError("key")
try:
with self.assertRaises(KeyError):
pass
except self.failureException as e:
self.assertIn("KeyError not raised", str(e))
else:
self.fail("assertRaises() didn't fail")
try:
with self.assertRaises(KeyError):
raise ValueError
except ValueError:
pass
else:
self.fail("assertRaises() didn't let exception pass through")
def test_assertRaises_frames_survival(self):
# Issue #9815: assertRaises should avoid keeping local variables
# in a traceback alive.
class A:
pass
wr = None
class Foo(unittest.TestCase):
def foo(self):
nonlocal wr
a = A()
wr = weakref.ref(a)
try:
raise IOError
except IOError:
raise ValueError
def test_functional(self):
self.assertRaises(ValueError, self.foo)
def test_with(self):
with self.assertRaises(ValueError):
self.foo()
Foo("test_functional").run()
self.assertIsNone(wr())
Foo("test_with").run()
self.assertIsNone(wr())
def testAssertNotRegex(self):
self.assertNotRegex('Ala ma kota', r'r+')
try:
self.assertNotRegex('Ala ma kota', r'k.t', 'Message')
except self.failureException as e:
self.assertIn('Message', e.args[0])
else:
self.fail('assertNotRegex should have failed.')
class TestLongMessage(unittest.TestCase):
"""Test that the individual asserts honour longMessage.
This actually tests all the message behaviour for
asserts that use longMessage."""
def setUp(self):
class TestableTestFalse(unittest.TestCase):
longMessage = False
failureException = self.failureException
def testTest(self):
pass
class TestableTestTrue(unittest.TestCase):
longMessage = True
failureException = self.failureException
def testTest(self):
pass
self.testableTrue = TestableTestTrue('testTest')
self.testableFalse = TestableTestFalse('testTest')
def testDefault(self):
self.assertTrue(unittest.TestCase.longMessage)
def test_formatMsg(self):
self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo")
self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo")
self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo")
# This blows up if _formatMessage uses string concatenation
self.testableTrue._formatMessage(object(), 'foo')
def test_formatMessage_unicode_error(self):
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing msg
self.testableTrue._formatMessage(one, '\uFFFD')
def assertMessages(self, methodName, args, errors):
"""
Check that methodName(*args) raises the correct error messages.
errors should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
def getMethod(i):
useTestableFalse = i < 2
if useTestableFalse:
test = self.testableFalse
else:
test = self.testableTrue
return getattr(test, methodName)
for i, expected_regex in enumerate(errors):
testMethod = getMethod(i)
kwargs = {}
withMsg = i % 2
if withMsg:
kwargs = {"msg": "oops"}
with self.assertRaisesRegex(self.failureException,
expected_regex=expected_regex):
testMethod(*args, **kwargs)
def testAssertTrue(self):
self.assertMessages('assertTrue', (False,),
["^False is not true$", "^oops$", "^False is not true$",
"^False is not true : oops$"])
def testAssertFalse(self):
self.assertMessages('assertFalse', (True,),
["^True is not false$", "^oops$", "^True is not false$",
"^True is not false : oops$"])
def testNotEqual(self):
self.assertMessages('assertNotEqual', (1, 1),
["^1 == 1$", "^oops$", "^1 == 1$",
"^1 == 1 : oops$"])
def testAlmostEqual(self):
self.assertMessages('assertAlmostEqual', (1, 2),
["^1 != 2 within 7 places$", "^oops$",
"^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"])
def testNotAlmostEqual(self):
self.assertMessages('assertNotAlmostEqual', (1, 1),
["^1 == 1 within 7 places$", "^oops$",
"^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"])
def test_baseAssertEqual(self):
self.assertMessages('_baseAssertEqual', (1, 2),
["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"])
def testAssertSequenceEqual(self):
# Error messages are multiline so not testing on full message
# assertTupleEqual and assertListEqual delegate to this method
self.assertMessages('assertSequenceEqual', ([], [None]),
["\+ \[None\]$", "^oops$", r"\+ \[None\]$",
r"\+ \[None\] : oops$"])
def testAssertSetEqual(self):
self.assertMessages('assertSetEqual', (set(), set([None])),
["None$", "^oops$", "None$",
"None : oops$"])
def testAssertIn(self):
self.assertMessages('assertIn', (None, []),
['^None not found in \[\]$', "^oops$",
'^None not found in \[\]$',
'^None not found in \[\] : oops$'])
def testAssertNotIn(self):
self.assertMessages('assertNotIn', (None, [None]),
['^None unexpectedly found in \[None\]$', "^oops$",
'^None unexpectedly found in \[None\]$',
'^None unexpectedly found in \[None\] : oops$'])
def testAssertDictEqual(self):
self.assertMessages('assertDictEqual', ({}, {'key': 'value'}),
[r"\+ \{'key': 'value'\}$", "^oops$",
"\+ \{'key': 'value'\}$",
"\+ \{'key': 'value'\} : oops$"])
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}),
["^Missing: 'key'$", "^oops$",
"^Missing: 'key'$",
"^Missing: 'key' : oops$"])
def testAssertMultiLineEqual(self):
self.assertMessages('assertMultiLineEqual', ("", "foo"),
[r"\+ foo$", "^oops$",
r"\+ foo$",
r"\+ foo : oops$"])
def testAssertLess(self):
self.assertMessages('assertLess', (2, 1),
["^2 not less than 1$", "^oops$",
"^2 not less than 1$", "^2 not less than 1 : oops$"])
def testAssertLessEqual(self):
self.assertMessages('assertLessEqual', (2, 1),
["^2 not less than or equal to 1$", "^oops$",
"^2 not less than or equal to 1$",
"^2 not less than or equal to 1 : oops$"])
def testAssertGreater(self):
self.assertMessages('assertGreater', (1, 2),
["^1 not greater than 2$", "^oops$",
"^1 not greater than 2$",
"^1 not greater than 2 : oops$"])
def testAssertGreaterEqual(self):
self.assertMessages('assertGreaterEqual', (1, 2),
["^1 not greater than or equal to 2$", "^oops$",
"^1 not greater than or equal to 2$",
"^1 not greater than or equal to 2 : oops$"])
def testAssertIsNone(self):
self.assertMessages('assertIsNone', ('not None',),
["^'not None' is not None$", "^oops$",
"^'not None' is not None$",
"^'not None' is not None : oops$"])
def testAssertIsNotNone(self):
self.assertMessages('assertIsNotNone', (None,),
["^unexpectedly None$", "^oops$",
"^unexpectedly None$",
"^unexpectedly None : oops$"])
def testAssertIs(self):
self.assertMessages('assertIs', (None, 'foo'),
["^None is not 'foo'$", "^oops$",
"^None is not 'foo'$",
"^None is not 'foo' : oops$"])
def testAssertIsNot(self):
self.assertMessages('assertIsNot', (None, None),
["^unexpectedly identical: None$", "^oops$",
"^unexpectedly identical: None$",
"^unexpectedly identical: None : oops$"])
def testAssertRegex(self):
self.assertMessages('assertRegex', ('foo', 'bar'),
["^Regex didn't match:",
"^oops$",
"^Regex didn't match:",
"^Regex didn't match: (.*) : oops$"])
def testAssertNotRegex(self):
self.assertMessages('assertNotRegex', ('foo', 'foo'),
["^Regex matched:",
"^oops$",
"^Regex matched:",
"^Regex matched: (.*) : oops$"])
def assertMessagesCM(self, methodName, args, func, errors):
"""
Check that the correct error messages are raised while executing:
with method(*args):
func()
*errors* should be a list of 4 regex that match the error when:
1) longMessage = False and no msg passed;
2) longMessage = False and msg passed;
3) longMessage = True and no msg passed;
4) longMessage = True and msg passed;
"""
p = product((self.testableFalse, self.testableTrue),
({}, {"msg": "oops"}))
for (cls, kwargs), err in zip(p, errors):
method = getattr(cls, methodName)
with self.assertRaisesRegex(cls.failureException, err):
with method(*args, **kwargs) as cm:
func()
def testAssertRaises(self):
self.assertMessagesCM('assertRaises', (TypeError,), lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
def testAssertRaisesRegex(self):
# test error not raised
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'),
lambda: None,
['^TypeError not raised$', '^oops$',
'^TypeError not raised$',
'^TypeError not raised : oops$'])
# test error raised but with wrong message
def raise_wrong_message():
raise TypeError('foo')
self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
def testAssertWarns(self):
self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
def testAssertWarnsRegex(self):
# test error not raised
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'),
lambda: None,
['^UserWarning not triggered$', '^oops$',
'^UserWarning not triggered$',
'^UserWarning not triggered : oops$'])
# test warning raised but with wrong message
def raise_wrong_message():
warnings.warn('foo')
self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'),
raise_wrong_message,
['^"regex" does not match "foo"$', '^oops$',
'^"regex" does not match "foo"$',
'^"regex" does not match "foo" : oops$'])
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
moreati/django
|
tests/model_formsets_regress/tests.py
|
173
|
20725
|
from __future__ import unicode_literals
from django import forms
from django.forms.formsets import DELETION_FIELD_NAME, BaseFormSet
from django.forms.models import (
BaseModelFormSet, inlineformset_factory, modelform_factory,
modelformset_factory,
)
from django.forms.utils import ErrorDict, ErrorList
from django.test import TestCase
from django.utils import six
from .models import (
Host, Manager, Network, ProfileNetwork, Restaurant, User, UserProfile,
UserSite,
)
class InlineFormsetTests(TestCase):
def test_formset_over_to_field(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #10243"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
'serial': '1',
'username': 'apollo13',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '0',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-data': '10',
'usersite_set-0-user': 'apollo13'
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 10)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '2',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13',
'usersite_set-1-data': '42',
'usersite_set-1-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values().order_by('data')
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
self.assertEqual(usersite[1]['data'], 42)
self.assertEqual(usersite[1]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_over_inherited_model(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #11120"
Form = modelform_factory(Restaurant, fields="__all__")
FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
'name': "Guido's House of Pasta",
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '0',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-name': 'Guido Van Rossum'
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Guido Van Rossum')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the Manager instance
data = {
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new Manager instance
data = {
'manager_set-TOTAL_FORMS': '2',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam',
'manager_set-1-name': 'John Cleese'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values().order_by('name')
self.assertEqual(manager[0]['name'], 'John Cleese')
self.assertEqual(manager[1]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_inline_model_with_to_field(self):
"""
#13794 --- An inline model with a to_field of a formset with instance
has working relations.
"""
FormSet = inlineformset_factory(User, UserSite, exclude=('is_superuser',))
user = User.objects.create(username="guido", serial=1337)
UserSite.objects.create(user=user, data=10)
formset = FormSet(instance=user)
# Testing the inline model's relation
self.assertEqual(formset[0].instance.user_id, "guido")
def test_inline_model_with_to_field_to_rel(self):
"""
#13794 --- An inline model with a to_field to a related field of a
formset with instance has working relations.
"""
FormSet = inlineformset_factory(UserProfile, ProfileNetwork, exclude=[])
user = User.objects.create(username="guido", serial=1337, pk=1)
self.assertEqual(user.pk, 1)
profile = UserProfile.objects.create(user=user, about="about", pk=2)
self.assertEqual(profile.pk, 2)
ProfileNetwork.objects.create(profile=profile, network=10, identifier=10)
formset = FormSet(instance=profile)
# Testing the inline model's relation
self.assertEqual(formset[0].instance.profile_id, 1)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
Form(instance=None)
FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"""
No fields passed to modelformset_factory() should result in no fields
on returned forms except for the id (#14119).
"""
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertIn('id', form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host, fields="__all__")
# Add a new host, modify previous host, and save-as-new
data = {
'host_set-TOTAL_FORMS': '2',
'host_set-INITIAL_FORMS': '1',
'host_set-MAX_NUM_FORMS': '0',
'host_set-0-id': six.text_type(host1.id),
'host_set-0-hostname': 'tranquility.hub.dal.net',
'host_set-1-hostname': 'matrix.de.eu.dal.net'
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerysetEqual(
dalnet.host_set.order_by("hostname"),
["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"]
)
def test_initial_data(self):
user = User.objects.create(username="bibi", serial=1)
UserSite.objects.create(user=user, data=7)
FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__")
formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}])
self.assertEqual(formset.forms[0].initial['data'], 7)
self.assertEqual(formset.extra_forms[0].initial['data'], 41)
self.assertIn('value="42"', formset.extra_forms[1].as_p())
class FormsetTests(TestCase):
def test_error_class(self):
'''
Test the type of Formset and Form error attributes
'''
Formset = modelformset_factory(User, fields="__all__")
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
'form-0-id': '',
'form-0-username': 'apollo13',
'form-0-serial': '1',
'form-1-id': '',
'form-1-username': 'apollo13',
'form-1-serial': '2',
}
formset = Formset(data)
# check if the returned error classes are correct
# note: formset.errors returns a list as documented
self.assertIsInstance(formset.errors, list)
self.assertIsInstance(formset.non_form_errors(), ErrorList)
for form in formset.forms:
self.assertIsInstance(form.errors, ErrorDict)
self.assertIsInstance(form.non_field_errors(), ErrorList)
def test_initial_data(self):
User.objects.create(username="bibi", serial=1)
Formset = modelformset_factory(User, fields="__all__", extra=2)
formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}])
self.assertEqual(formset.forms[0].initial['username'], "bibi")
self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11")
self.assertIn('value="apollo12"', formset.extra_forms[1].as_p())
def test_extraneous_query_is_not_run(self):
Formset = modelformset_factory(Network, fields="__all__")
data = {'test-TOTAL_FORMS': '1',
'test-INITIAL_FORMS': '0',
'test-MAX_NUM_FORMS': '',
'test-0-name': 'Random Place', }
with self.assertNumQueries(1):
formset = Formset(data, prefix="test")
formset.save()
class CustomWidget(forms.widgets.TextInput):
pass
class UserSiteForm(forms.ModelForm):
class Meta:
model = UserSite
fields = "__all__"
widgets = {
'id': CustomWidget,
'data': CustomWidget,
}
localized_fields = ('data',)
class Callback(object):
def __init__(self):
self.log = []
def __call__(self, db_field, **kwargs):
self.log.append((db_field, kwargs))
return db_field.formfield(**kwargs)
class FormfieldCallbackTests(TestCase):
"""
Regression for #13095 and #17683: Using base forms with widgets
defined in Meta should not raise errors and BaseModelForm should respect
the specified pk widget.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__")
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {'widget': CustomWidget}),
(user_field, {}),
(data_field, {'widget': CustomWidget, 'localize': True}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(User, UserSite, form=UserSiteForm,
formfield_callback=callback, fields="__all__")
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
class BaseCustomDeleteFormSet(BaseFormSet):
"""
A formset mix-in that lets a form decide if it's to be deleted.
Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed.
form.should_delete() is called. The formset delete field is also suppressed.
"""
def add_fields(self, form, index):
super(BaseCustomDeleteFormSet, self).add_fields(form, index)
self.can_delete = True
if DELETION_FIELD_NAME in form.fields:
del form.fields[DELETION_FIELD_NAME]
def _should_delete_form(self, form):
return hasattr(form, 'should_delete') and form.should_delete()
class FormfieldShouldDeleteFormTests(TestCase):
"""
Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form
"""
class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet):
""" Model FormSet with CustomDelete MixIn """
class CustomDeleteUserForm(forms.ModelForm):
""" A model form with a 'should_delete' method """
class Meta:
model = User
fields = "__all__"
def should_delete(self):
""" delete form if odd PK """
return self.instance.pk % 2 != 0
NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True)
DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet)
data = {
'form-TOTAL_FORMS': '4',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '4',
'form-0-username': 'John',
'form-0-serial': '1',
'form-1-username': 'Paul',
'form-1-serial': '2',
'form-2-username': 'George',
'form-2-serial': '3',
'form-3-username': 'Ringo',
'form-3-serial': '5',
}
delete_all_ids = {
'form-0-DELETE': '1',
'form-1-DELETE': '1',
'form-2-DELETE': '1',
'form-3-DELETE': '1',
}
def test_init_database(self):
""" Add test data to database via formset """
formset = self.NormalFormset(self.data)
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 4)
def test_no_delete(self):
""" Verify base formset doesn't modify database """
# reload database
self.test_init_database()
# pass standard data dict & see none updated
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update({
'form-%d-id' % i: user.pk
for i, user in enumerate(User.objects.all())
})
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 4)
def test_all_delete(self):
""" Verify base formset honors DELETE field """
# reload database
self.test_init_database()
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update({
'form-%d-id' % i: user.pk
for i, user in enumerate(User.objects.all())
})
data.update(self.delete_all_ids)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 0)
def test_custom_delete(self):
""" Verify DeleteFormset ignores DELETE field and uses form method """
# reload database
self.test_init_database()
# Create formset with custom Delete function
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update({
'form-%d-id' % i: user.pk
for i, user in enumerate(User.objects.all())
})
data.update(self.delete_all_ids)
formset = self.DeleteFormset(data, queryset=User.objects.all())
# verify two were deleted
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 2)
# verify no "odd" PKs left
odd_ids = [user.pk for user in User.objects.all() if user.pk % 2]
self.assertEqual(len(odd_ids), 0)
class RedeleteTests(TestCase):
def test_resubmit(self):
u = User.objects.create(username='foo', serial=1)
us = UserSite.objects.create(user=u, data=7)
formset_cls = inlineformset_factory(User, UserSite, fields="__all__")
data = {
'serial': '1',
'username': 'foo',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '1',
'usersite_set-0-id': six.text_type(us.pk),
'usersite_set-0-data': '7',
'usersite_set-0-user': 'foo',
'usersite_set-0-DELETE': '1'
}
formset = formset_cls(data, instance=u)
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
formset = formset_cls(data, instance=u)
# Even if the "us" object isn't in the DB any more, the form
# validates.
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
def test_delete_already_deleted(self):
u = User.objects.create(username='foo', serial=1)
us = UserSite.objects.create(user=u, data=7)
formset_cls = inlineformset_factory(User, UserSite, fields="__all__")
data = {
'serial': '1',
'username': 'foo',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '1',
'usersite_set-0-id': six.text_type(us.pk),
'usersite_set-0-data': '7',
'usersite_set-0-user': 'foo',
'usersite_set-0-DELETE': '1'
}
formset = formset_cls(data, instance=u)
us.delete()
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
|
bsd-3-clause
|
ahmetabdi/SickRage
|
lib/sqlalchemy/event/api.py
|
75
|
3844
|
# event/api.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions for the event system.
"""
from __future__ import absolute_import
from .. import util, exc
from .base import _registrars
from .registry import _EventKey
CANCEL = util.symbol('CANCEL')
NO_RETVAL = util.symbol('NO_RETVAL')
def _event_key(target, identifier, fn):
for evt_cls in _registrars[identifier]:
tgt = evt_cls._accept_with(target)
if tgt is not None:
return _EventKey(target, identifier, fn, tgt)
else:
raise exc.InvalidRequestError("No such event '%s' for target '%s'" %
(identifier, target))
def listen(target, identifier, fn, *args, **kw):
"""Register a listener function for the given target.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
def on_config():
do_config()
event.listen(Mapper, "before_configure", on_config, once=True)
.. versionadded:: 0.9.3 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
"""
_event_key(target, identifier, fn).listen(*args, **kw)
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
e.g.::
from sqlalchemy import event
from sqlalchemy.schema import UniqueConstraint
@event.listens_for(UniqueConstraint, "after_parent_attach")
def unique_constraint_name(const, table):
const.name = "uq_%s_%s" % (
table.name,
list(const.columns)[0].name
)
A given function can also be invoked for only the first invocation
of the event using the ``once`` argument::
@event.listens_for(Mapper, "before_configure", once=True)
def on_config():
do_config()
.. versionadded:: 0.9.3 Added ``once=True`` to :func:`.event.listen`
and :func:`.event.listens_for`.
"""
def decorate(fn):
listen(target, identifier, fn, *args, **kw)
return fn
return decorate
def remove(target, identifier, fn):
"""Remove an event listener.
The arguments here should match exactly those which were sent to
:func:`.listen`; all the event registration which proceeded as a result
of this call will be reverted by calling :func:`.remove` with the same
arguments.
e.g.::
# if a function was registered like this...
@event.listens_for(SomeMappedClass, "before_insert", propagate=True)
def my_listener_function(*arg):
pass
# ... it's removed like this
event.remove(SomeMappedClass, "before_insert", my_listener_function)
Above, the listener function associated with ``SomeMappedClass`` was also
propagated to subclasses of ``SomeMappedClass``; the :func:`.remove` function
will revert all of these operations.
.. versionadded:: 0.9.0
"""
_event_key(target, identifier, fn).remove()
def contains(target, identifier, fn):
"""Return True if the given target/ident/fn is set up to listen.
.. versionadded:: 0.9.0
"""
return _event_key(target, identifier, fn).contains()
|
gpl-3.0
|
sophacles/invoke
|
tests/parser/context.py
|
2
|
11893
|
import copy
from spec import Spec, eq_, skip, ok_, raises
from invoke.parser import Argument, Context
from invoke.tasks import task
from invoke.collection import Collection
class Context_(Spec):
"ParserContext" # meh
def may_have_a_name(self):
c = Context(name='taskname')
eq_(c.name, 'taskname')
def may_have_aliases(self):
c = Context(name='realname', aliases=('othername', 'yup'))
assert 'othername' in c.aliases
def may_give_arg_list_at_init_time(self):
a1 = Argument('foo')
a2 = Argument('bar')
c = Context(name='name', args=(a1, a2))
assert c.args['foo'] is a1
# TODO: reconcile this sort of test organization with the .flags oriented
# tests within 'add_arg'. Some of this behavior is technically driven by
# add_arg.
class args:
def setup(self):
self.c = Context(args=(
Argument('foo'),
Argument(names=('bar', 'biz')),
Argument('baz', attr_name='wat'),
))
def exposed_as_dict(self):
assert 'foo' in self.c.args.keys()
def exposed_as_Lexicon(self):
eq_(self.c.args.bar, self.c.args['bar'])
def args_dict_includes_all_arg_names(self):
for x in ('foo', 'bar', 'biz'):
assert x in self.c.args
def argument_attr_names_appear_in_args_but_not_flags(self):
# Both appear as "Python-facing" args
for x in ('baz', 'wat'):
assert x in self.c.args
# But attr_name is for Python access only and isn't shown to the
# parser.
assert 'wat' not in self.c.flags
class add_arg:
def setup(self):
self.c = Context()
def can_take_Argument_instance(self):
a = Argument(names=('foo',))
self.c.add_arg(a)
assert self.c.args['foo'] is a
def can_take_name_arg(self):
self.c.add_arg('foo')
assert 'foo' in self.c.args
def can_take_kwargs_for_single_Argument(self):
self.c.add_arg(names=('foo', 'bar'))
assert 'foo' in self.c.args and 'bar' in self.c.args
@raises(ValueError)
def raises_ValueError_on_duplicate(self):
self.c.add_arg(names=('foo', 'bar'))
self.c.add_arg(name='bar')
def adds_flaglike_name_to_dot_flags(self):
"adds flaglike name to .flags"
self.c.add_arg('foo')
assert '--foo' in self.c.flags
def adds_all_names_to_dot_flags(self):
"adds all names to .flags"
self.c.add_arg(names=('foo', 'bar'))
assert '--foo' in self.c.flags
assert '--bar' in self.c.flags
def adds_true_bools_to_inverse_flags(self):
self.c.add_arg(name='myflag', default=True, kind=bool)
assert '--myflag' in self.c.flags
assert '--no-myflag' in self.c.inverse_flags
eq_(self.c.inverse_flags['--no-myflag'], '--myflag')
def inverse_flags_works_right_with_task_driven_underscored_names(self):
# Use a Task here instead of creating a raw argument, we're partly
# testing Task.get_arguments()' transform of underscored names
# here. Yes that makes this an integration test, but it's nice to
# test it here at this level & not just in cli tests.
@task
def mytask(underscored_option=True):
pass
self.c.add_arg(mytask.get_arguments()[0])
eq_(
self.c.inverse_flags['--no-underscored-option'],
'--underscored-option'
)
def turns_single_character_names_into_short_flags(self):
self.c.add_arg('f')
assert '-f' in self.c.flags
assert '--f' not in self.c.flags
def adds_positional_args_to_positional_args(self):
self.c.add_arg(name='pos', positional=True)
eq_(self.c.positional_args[0].name, 'pos')
def positional_args_empty_when_none_given(self):
eq_(len(self.c.positional_args), 0)
def positional_args_filled_in_order(self):
self.c.add_arg(name='pos1', positional=True)
eq_(self.c.positional_args[0].name, 'pos1')
self.c.add_arg(name='abc', positional=True)
eq_(self.c.positional_args[1].name, 'abc')
def positional_arg_modifications_affect_args_copy(self):
self.c.add_arg(name='hrm', positional=True)
eq_(self.c.args['hrm'].value, self.c.positional_args[0].value)
self.c.positional_args[0].value = 17
eq_(self.c.args['hrm'].value, self.c.positional_args[0].value)
class deepcopy:
"__deepcopy__"
def setup(self):
self.arg = Argument('--boolean')
self.orig = Context(
name='mytask',
args=(self.arg,),
aliases=('othername',)
)
self.new = copy.deepcopy(self.orig)
def returns_correct_copy(self):
assert self.new is not self.orig
eq_(self.new.name, 'mytask')
assert 'othername' in self.new.aliases
def includes_arguments(self):
eq_(len(self.new.args), 1)
assert self.new.args['--boolean'] is not self.arg
def modifications_to_copied_arguments_do_not_touch_originals(self):
new_arg = self.new.args['--boolean']
new_arg.value = True
assert new_arg.value
assert not self.arg.value
class help_for:
def setup(self):
# Normal, non-task/collection related Context
self.vanilla = Context(args=(
Argument('foo'),
Argument('bar', help="bar the baz")
))
# Task/Collection generated Context
# (will expose flags n such)
@task(help={'otherarg': 'other help'}, optional=['optval'])
def mytask(myarg, otherarg, optval):
pass
col = Collection(mytask)
self.tasked = col.to_contexts()[0]
@raises(ValueError)
def raises_ValueError_for_non_flag_values(self):
self.vanilla.help_for('foo')
def vanilla_no_helpstr(self):
eq_(
self.vanilla.help_for('--foo'),
("--foo=STRING", "")
)
def vanilla_with_helpstr(self):
eq_(
self.vanilla.help_for('--bar'),
("--bar=STRING", "bar the baz")
)
def task_driven_with_helpstr(self):
eq_(
self.tasked.help_for('--otherarg'),
("-o STRING, --otherarg=STRING", "other help")
)
# Yes, the next 3 tests are identical in form, but technically they
# test different behaviors. HERPIN' AN' DERPIN'
def task_driven_no_helpstr(self):
eq_(
self.tasked.help_for('--myarg'),
("-m STRING, --myarg=STRING", "")
)
def short_form_before_long_form(self):
eq_(
self.tasked.help_for('--myarg'),
("-m STRING, --myarg=STRING", "")
)
def equals_sign_for_long_form_only(self):
eq_(
self.tasked.help_for('--myarg'),
("-m STRING, --myarg=STRING", "")
)
def kind_to_placeholder_map(self):
# str=STRING, int=INT, etc etc
skip()
def shortflag_inputs_work_too(self):
eq_(self.tasked.help_for('-m'), self.tasked.help_for('--myarg'))
def optional_values_use_brackets(self):
eq_(
self.tasked.help_for('--optval'),
("-p [STRING], --optval[=STRING]", "")
)
def underscored_args(self):
c = Context(args=(Argument('i_have_underscores', help='yup'),))
eq_(c.help_for('--i-have-underscores'), ('--i-have-underscores=STRING', 'yup'))
def true_default_args(self):
c = Context(args=(Argument('truthy', kind=bool, default=True),))
eq_(c.help_for('--truthy'), ('--[no-]truthy', ''))
class help_tuples:
def returns_list_of_help_tuples(self):
# Walks own list of flags/args, ensures resulting map to help_for()
# TODO: consider redoing help_for to be more flexible on input --
# arg value or flag; or even Argument objects. ?
@task(help={'otherarg': 'other help'})
def mytask(myarg, otherarg):
pass
c = Collection(mytask).to_contexts()[0]
eq_(
c.help_tuples(),
[c.help_for('--myarg'), c.help_for('--otherarg')]
)
def _assert_order(self, name_tuples, expected_flag_order):
ctx = Context(args=[Argument(names=x) for x in name_tuples])
return eq_(
ctx.help_tuples(),
[ctx.help_for(x) for x in expected_flag_order]
)
def sorts_alphabetically_by_shortflag_first(self):
# Where shortflags exist, they take precedence
self._assert_order(
[('zarg', 'a'), ('arg', 'z')],
['--zarg', '--arg']
)
def case_ignored_during_sorting(self):
self._assert_order(
[('a',), ('B',)],
# In raw cmp() uppercase would come before lowercase,
# and we'd get ['-B', '-a']
['-a', '-B']
)
def lowercase_wins_when_values_identical_otherwise(self):
self._assert_order(
[('V',), ('v',)],
['-v', '-V']
)
def sorts_alphabetically_by_longflag_when_no_shortflag(self):
# Where no shortflag, sorts by longflag
self._assert_order(
[('otherarg',), ('longarg',)],
['--longarg', '--otherarg']
)
def sorts_heterogenous_help_output_with_longflag_only_options_first(self):
# When both of the above mix, long-flag-only options come first.
# E.g.:
# --alpha
# --beta
# -a, --aaaagh
# -b, --bah
# -c
self._assert_order(
[('c',), ('a', 'aaagh'), ('b', 'bah'), ('beta',), ('alpha',)],
['--alpha', '--beta', '-a', '-b', '-c']
)
def mixed_corelike_options(self):
self._assert_order(
[('V', 'version'), ('c', 'collection'), ('h', 'help'),
('l', 'list'), ('r', 'root')],
['-c', '-h', '-l', '-r', '-V']
)
class needs_positional_arg:
def represents_whether_all_positional_args_have_values(self):
c = Context(name='foo', args=(
Argument('arg1', positional=True),
Argument('arg2', positional=False),
Argument('arg3', positional=True),
))
eq_(c.needs_positional_arg, True)
c.positional_args[0].value = 'wat'
eq_(c.needs_positional_arg, True)
c.positional_args[1].value = 'hrm'
eq_(c.needs_positional_arg, False)
class str:
"__str__"
def with_no_args_output_is_simple(self):
eq_(str(Context('foo')), "<parser/Context 'foo'>")
def args_show_as_repr(self):
eq_(
str(Context('bar', args=[Argument('arg1')])),
"<parser/Context 'bar': {'arg1': <Argument: arg1>}>"
)
def repr_is_str(self):
"__repr__ mirrors __str__"
c = Context('foo')
eq_(str(c), repr(c))
|
bsd-2-clause
|
Insanityandme/dotfiles
|
vim/bundle/ultisnips/test/test_Completion.py
|
29
|
1299
|
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Tab Completion of Words {{{#
class Completion_SimpleExample_ECR(_VimTest):
snippets = ('test', '$1 ${1:blah}')
keys = 'superkallifragilistik\ntest' + EX + 'sup' + COMPL_KW + \
COMPL_ACCEPT + ' some more'
wanted = 'superkallifragilistik\nsuperkallifragilistik some more ' \
'superkallifragilistik some more'
# We need >2 different words with identical starts to create the
# popup-menu:
COMPLETION_OPTIONS = 'completion1\ncompletion2\n'
class Completion_ForwardsJumpWithoutCOMPL_ACCEPT(_VimTest):
# completions should not be truncated when JF is activated without having
# pressed COMPL_ACCEPT (Bug #598903)
snippets = ('test', '$1 $2')
keys = COMPLETION_OPTIONS + 'test' + EX + 'com' + COMPL_KW + JF + 'foo'
wanted = COMPLETION_OPTIONS + 'completion1 foo'
class Completion_BackwardsJumpWithoutCOMPL_ACCEPT(_VimTest):
# completions should not be truncated when JB is activated without having
# pressed COMPL_ACCEPT (Bug #598903)
snippets = ('test', '$1 $2')
keys = COMPLETION_OPTIONS + 'test' + EX + 'foo' + JF + 'com' + COMPL_KW + \
JB + 'foo'
wanted = COMPLETION_OPTIONS + 'foo completion1'
# End: Tab Completion of Words #}}}
|
unlicense
|
rgerkin/neuroConstruct
|
lib/jython/Lib/UserList.py
|
327
|
3644
|
"""A more or less complete user-defined wrapper around list objects."""
import collections
class UserList(collections.MutableSequence):
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserList): return other.data
else: return other
def __cmp__(self, other):
return cmp(self.data, self.__cast(other))
__hash__ = None # Mutable sequence, so not hashable
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
|
gpl-2.0
|
dou800/php-buildpack-legacy
|
builds/runtimes/python-2.7.6/lib/python2.7/email/test/test_email_codecs_renamed.py
|
298
|
2842
|
# Copyright (C) 2002-2006 Python Software Foundation
# Contact: [email protected]
# email package unit tests for (optional) Asian codecs
import unittest
from test.test_support import run_unittest
from email.test.test_email import TestEmailBase
from email.charset import Charset
from email.header import Header, decode_header
from email.message import Message
# We're compatible with Python 2.3, but it doesn't have the built-in Asian
# codecs, so we have to skip all these tests.
try:
unicode('foo', 'euc-jp')
except LookupError:
raise unittest.SkipTest
class TestEmailAsianCodecs(TestEmailBase):
def test_japanese_codecs(self):
eq = self.ndiffAssertEqual
j = Charset("euc-jp")
g = Charset("iso-8859-1")
h = Header("Hello World!")
jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'
ghello = 'Gr\xfc\xdf Gott!'
h.append(jhello, j)
h.append(ghello, g)
# BAW: This used to -- and maybe should -- fold the two iso-8859-1
# chunks into a single encoded word. However it doesn't violate the
# standard to have them as two encoded chunks and maybe it's
# reasonable <wink> for each .append() call to result in a separate
# encoded word.
eq(h.encode(), """\
Hello World! =?iso-2022-jp?b?GyRCJU8lbSE8JW8hPCVrJUkhKhsoQg==?=
=?iso-8859-1?q?Gr=FC=DF?= =?iso-8859-1?q?_Gott!?=""")
eq(decode_header(h.encode()),
[('Hello World!', None),
('\x1b$B%O%m!<%o!<%k%I!*\x1b(B', 'iso-2022-jp'),
('Gr\xfc\xdf Gott!', 'iso-8859-1')])
long = 'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9'
h = Header(long, j, header_name="Subject")
# test a very long header
enc = h.encode()
# TK: splitting point may differ by codec design and/or Header encoding
eq(enc , """\
=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKGyhC?=
=?iso-2022-jp?b?GyRCMnE8VCROPjVHJyRyQlQkQyRGJCQkXiQ5GyhC?=""")
# TK: full decode comparison
eq(h.__unicode__().encode('euc-jp'), long)
def test_payload_encoding(self):
jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'
jcode = 'euc-jp'
msg = Message()
msg.set_payload(jhello, jcode)
ustr = unicode(msg.get_payload(), msg.get_content_charset())
self.assertEqual(jhello, ustr.encode(jcode))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestEmailAsianCodecs))
return suite
def test_main():
run_unittest(TestEmailAsianCodecs)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
mit
|
Mappy/luigi
|
luigi/parameter.py
|
1
|
16451
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import configuration
import datetime
import warnings
from ConfigParser import NoSectionError, NoOptionError
_no_value = object()
class ParameterException(Exception):
"""Base exception."""
pass
class MissingParameterException(ParameterException):
"""Exception signifying that there was a missing Parameter."""
pass
class UnknownParameterException(ParameterException):
"""Exception signifying that an unknown Parameter was supplied."""
pass
class DuplicateParameterException(ParameterException):
"""Exception signifying that a Parameter was specified multiple times."""
pass
class UnknownConfigException(ParameterException):
"""Exception signifying that the ``default_from_config`` for the Parameter could not be found."""
pass
class Parameter(object):
"""An untyped Parameter
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
class MyTask(luigi.Task):
foo = luigi.Parameter()
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``My(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
There are subclasses of ``Parameter`` that define what type the parameter has. This is not
enforced within Python, but are used for command line interaction.
The ``config_path`` argument lets you specify a place where the parameter is read from config
in case no value is provided.
Providing ``is_global=True`` changes the behavior of the parameter so that the value is shared
across all instances of the task. Global parameters can be provided in several ways. In
falling order of precedence:
* A value provided on the command line (eg. ``--my-global-value xyz``)
* A value provided via config (using the ``config_path`` argument)
* A default value set using the ``default`` flag.
"""
counter = 0
"""non-atomically increasing counter used for ordering parameters."""
def __init__(self, default=_no_value, is_list=False, is_boolean=False, is_global=False, significant=True, description=None,
config_path=None, default_from_config=None):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool is_list: specify ``True`` if the parameter should allow a list of values rather
than a single value. Default: ``False``. A list has an implicit default
value of ``[]``.
:param bool is_boolean: specify ``True`` if the parameter is a boolean value. Default:
``False``. Boolean's have an implicit default value of ``False``.
:param bool is_global: specify ``True`` if the parameter is global (i.e. used by multiple
Tasks). Default: ``False``.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter.
Default: ``None``.
"""
# The default default is no default
self.__default = default
self.__global = _no_value
self.is_list = is_list
self.is_boolean = is_boolean and not is_list # Only BooleanParameter should ever use this. TODO(erikbern): should we raise some kind of exception?
self.is_global = is_global # It just means that the default value is exposed and you can override it
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
if default_from_config is not None:
warnings.warn("Use config_path parameter, not default_from_config", DeprecationWarning)
config_path = default_from_config
if is_global and default == _no_value and config_path is None:
raise ParameterException('Global parameters need default values')
self.description = description
if config_path is not None and (not 'section' in config_path or not 'name' in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self.__config = config_path
self.counter = Parameter.counter # We need to keep track of this to get the order right (see Task class)
Parameter.counter += 1
def _get_value_from_config(self):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
if not self.__config:
return _no_value
conf = configuration.get_config()
(section, name) = (self.__config['section'], self.__config['name'])
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError), e:
return _no_value
if self.is_list:
return tuple(self.parse(p.strip()) for p in value.strip().split('\n'))
else:
return self.parse(value)
@property
def has_value(self):
"""``True`` if a default was specified or if config_path references a valid entry in the conf.
Note that "value" refers to the Parameter object itself - it can be either
1. The default value for this parameter
2. A value read from the config
3. A global value
Any Task instance can have its own value set that overrides this.
"""
values = [self.__global, self._get_value_from_config(), self.__default]
for value in values:
if value != _no_value:
return True
else:
return False
@property
def has_default(self):
"""Don't use this function - see has_value instead"""
warnings.warn('Use has_value rather than has_default. The meaning of "default" has changed', DeprecationWarning)
return self.has_value
@property
def value(self):
"""The value for this Parameter.
This refers to any value defined by a default, a config option, or
a global value.
:raises MissingParameterException: if a value is not set.
:return: the parsed value.
"""
values = [self.__global, self._get_value_from_config(), self.__default]
for value in values:
if value != _no_value:
return value
else:
raise MissingParameterException("No default specified")
@property
def default(self):
warnings.warn('Use value rather than default. The meaning of "default" has changed', DeprecationWarning)
return self.value
def set_global(self, value):
"""Set the global value of this Parameter.
:param value: the new global value.
"""
assert self.is_global
self.__global = value
def reset_global(self):
self.__global = _no_value
def set_default(self, value):
"""Set the default value of this Parameter.
:param value: the new default value.
"""
warnings.warn('Use set_global rather than set_default. The meaning of "default" has changed', DeprecationWarning)
self.__default = value
def parse(self, x):
"""Parse an individual value from the input.
The default implementation is an identify (it returns ``x``), but subclasses should override
this method for specialized parsing. This method is called by :py:meth:`parse_from_input`
if ``x`` exists. If this Parameter was specified with ``is_list=True``, then ``parse`` is
called once for each item in the list.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def serialize(self, x): # opposite of parse
"""Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return str(x)
def parse_from_input(self, param_name, x):
"""
Parses the parameter value from input ``x``, handling defaults and is_list.
:param param_name: the name of the parameter. This is used for the message in
``MissingParameterException``.
:param x: the input value to parse.
:raises MissingParameterException: if x is false-y and no default is specified.
"""
if not x:
if self.has_value:
return self.value
elif self.is_boolean:
return False
elif self.is_list:
return []
else:
raise MissingParameterException("No value for '%s' (%s) submitted and no default value has been assigned." % \
(param_name, "--" + param_name.replace('_', '-')))
elif self.is_list:
return tuple(self.parse(p) for p in x)
else:
return self.parse(x)
def serialize_to_input(self, x):
if self.is_list:
return tuple(self.serialize(p) for p in x)
else:
return self.serialize(x)
class DateHourParameter(Parameter):
"""Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime` using the format string ``%Y-%m-%dT%H``.
"""
# TODO(erikbern): we should probably use an internal class for arbitary
# time intervals (similar to date_interval). Or what do you think?
return datetime.datetime.strptime(s, "%Y-%m-%dT%H") # ISO 8601 is to use 'T'
def serialize(self, dt):
"""
Converts the datetime to a string usnig the format string ``%Y-%m-%dT%H``.
"""
if dt is None: return str(dt)
return dt.strftime('%Y-%m-%dT%H')
class DateParameter(Parameter):
"""Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
"""
def parse(self, s):
"""Parses a date string formatted as ``YYYY-MM-DD``."""
return datetime.date(*map(int, s.split('-')))
class IntParameter(Parameter):
"""Parameter whose value is an ``int``."""
def parse(self, s):
"""Parses an ``int`` from the string using ``int()``."""
return int(s)
class FloatParameter(Parameter):
"""Parameter whose value is a ``float``."""
def parse(self, s):
"""Parses a ``float`` from the string using ``float()``."""
return float(s)
class BooleanParameter(Parameter):
"""A Parameter whose value is a ``bool``."""
# TODO(erikbern): why do we call this "boolean" instead of "bool"?
# The integer parameter is called "int" so calling this "bool" would be
# more consistent, especially given the Python type names.
def __init__(self, *args, **kwargs):
"""This constructor passes along args and kwargs to ctor for :py:class:`Parameter` but
specifies ``is_boolean=True``.
"""
super(BooleanParameter, self).__init__(*args, is_boolean=True, **kwargs)
def parse(self, s):
"""Parses a ``boolean`` from the string, matching 'true' or 'false' ignoring case."""
return {'true': True, 'false': False}[str(s).lower()]
class DateIntervalParameter(Parameter):
"""A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`.
Date Intervals are specified using the ISO 8601 `Time Interval
<http://en.wikipedia.org/wiki/ISO_8601#Time_intervals>`_ notation.
"""
# Class that maps to/from dates using ISO 8601 standard
# Also gives some helpful interval algebra
def parse(self, s):
"""Parses a `:py:class:`~luigi.date_interval.DateInterval` from the input.
see :py:mod:`luigi.date_interval`
for details on the parsing of DateIntervals.
"""
# TODO: can we use xml.utils.iso8601 or something similar?
import date_interval as d
for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]:
i = cls.parse(s)
if i:
return i
else:
raise ValueError('Invalid date interval - could not be parsed')
class TimeDeltaParameter(Parameter):
"""Class that maps to timedelta using strings in any of the following forms:
- ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h")
Note: multiple arguments must be supplied in longest to shortest unit order
- ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported)
- ISO 8601 duration ``PnW``
See https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
def _apply_regex(self, regex, input):
from datetime import timedelta
import re
re_match = re.match(regex, input)
if re_match:
kwargs = {}
has_val = False
for k,v in re_match.groupdict(default="0").items():
val = int(v)
has_val = has_val or val != 0
kwargs[k] = val
if has_val:
return timedelta(**kwargs)
def _parseIso8601(self, input):
def field(key):
return "(?P<%s>\d+)%s" % (key, key[0].upper())
def optional_field(key):
return "(%s)?" % field(key)
# A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta)
regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]]))
return self._apply_regex(regex,input)
def _parseSimple(self, input):
keys = ["weeks", "days", "hours", "minutes", "seconds"]
# Give the digits a regex group name from the keys, then look for text with the first letter of the key,
# optionally followed by the rest of the word, with final char (the "s") optional
regex = "".join(["((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys])
return self._apply_regex(regex, input)
def parse(self, input):
"""Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input)
|
apache-2.0
|
SoftwareKing/zstack-utility
|
zstacklib/zstacklib/test/test_thread.py
|
3
|
1078
|
'''
@author: frank
'''
import unittest
import time
import threading
from ..utils.thread import ThreadFacade
from ..utils.thread import AsyncThread
class TestThreadFacade(unittest.TestCase):
def _do(self, name, value=None):
self.ok = name
self.value = value;
def test_run_in_thread(self):
t = ThreadFacade.run_in_thread(self._do, ["ok"], {"value":"world"})
t.join()
self.assertEqual("ok", self.ok)
self.assertEqual("world", self.value)
@AsyncThread
def _do_async(self, ok, value=None):
self.async_thread_name = threading.current_thread().getName()
self.async_ok = ok
self.async_value = value
def test_async_thread(self):
t = self._do_async("ok", value="world")
t.join()
self.assertEqual('_do_async', self.async_thread_name)
self.assertEqual("ok", self.async_ok)
self.assertEqual("world", self.async_value)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
apache-2.0
|
ONSdigital/eq-survey-runner
|
app/views/static.py
|
1
|
2777
|
from flask import Blueprint, current_app, request, session as cookie_session
from flask_themes2 import render_theme_template
from structlog import get_logger
from app.globals import get_session_store
from app.utilities.schema import load_schema_from_session_data
from app.utilities.cookies import analytics_allowed
logger = get_logger()
contact_blueprint = Blueprint(name='contact', import_name=__name__)
@contact_blueprint.route('/contact-us', methods=['GET'])
def contact():
session = None
survey_id = None
session_store = get_session_store()
if session_store:
session = session_store.session_data
schema = load_schema_from_session_data(session)
survey_id = schema.json['survey_id']
cookie_message = request.cookies.get('ons_cookie_message_displayed')
allow_analytics = analytics_allowed(request)
contact_template = render_theme_template(theme=cookie_session.get('theme', 'default'),
template_name='static/contact-us.html',
session=session,
survey_id=survey_id,
analytics_gtm_id=current_app.config['EQ_GTM_ID'],
analytics_gtm_env_id=current_app.config['EQ_GTM_ENV_ID'],
cookie_message=cookie_message,
allow_analytics=allow_analytics)
return contact_template
@contact_blueprint.route('/cookies-privacy', methods=['GET'])
def legal():
cookie_template = render_theme_template(theme=cookie_session.get('theme', 'default'),
template_name='static/cookies-privacy.html',
analytics_gtm_id=current_app.config['EQ_GTM_ID'],
analytics_gtm_env_id=current_app.config['EQ_GTM_ENV_ID'])
return cookie_template
@contact_blueprint.route('/cookies-settings', methods=['GET'])
def settings():
cookie_message = request.cookies.get('ons_cookie_message_displayed')
allow_analytics = analytics_allowed(request)
cookie_template = render_theme_template(theme=cookie_session.get('theme', 'default'),
analytics_gtm_id=current_app.config['EQ_GTM_ID'],
analytics_gtm_env_id=current_app.config['EQ_GTM_ENV_ID'],
cookie_message=cookie_message,
allow_analytics=allow_analytics,
template_name='static/cookies-settings.html')
return cookie_template
|
mit
|
alfredgamulo/cloud-custodian
|
tests/test_notify.py
|
2
|
9902
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest, functional
import base64
import os
import json
import time
import tempfile
import zlib
from c7n.exceptions import PolicyValidationError
class NotifyTest(BaseTest):
@functional
def test_notify_address_from(self):
session_factory = self.replay_flight_data("test_notify_address_from")
client = session_factory().client("sqs")
queue_url = client.create_queue(QueueName="c7n-notify-test")["QueueUrl"]
def cleanup():
client.delete_queue(QueueUrl=queue_url)
if self.recording:
time.sleep(60)
self.addCleanup(cleanup)
temp_file = tempfile.NamedTemporaryFile(mode="w", delete=False)
json.dump({"emails": ["[email protected]"]}, temp_file)
temp_file.flush()
self.addCleanup(os.unlink, temp_file.name)
self.addCleanup(temp_file.close)
url = "file:///%s" % temp_file.name.replace('\\', '/')
policy = self.load_policy(
{
"name": "notify-address",
"resource": "sqs",
"filters": [{"QueueUrl": queue_url}],
"actions": [
{
"type": "notify",
"to": ["[email protected]"],
"to_from": {
"url": url,
"format": "json",
"expr": "emails",
},
"cc_from": {
"url": url,
"format": "json",
"expr": "emails",
},
"transport": {"type": "sqs", "queue": queue_url},
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(policy.data.get("actions")[0].get("to"), ["[email protected]"])
self.assertEqual(len(resources), 1)
messages = client.receive_message(
QueueUrl=queue_url, AttributeNames=["All"]
).get(
"Messages", []
)
self.assertEqual(len(messages), 1)
body = json.loads(zlib.decompress(base64.b64decode(messages[0]["Body"])))
self.assertEqual(
set(body.keys()),
{
"account_id",
"action",
"event",
"policy",
"region",
"account",
"resources",
},
)
def test_resource_prep(self):
session_factory = self.record_flight_data("test_notify_resource_prep")
policy = self.load_policy(
{"name": "notify-sns",
"resource": "ec2",
"actions": [
{"type": "notify", "to": ["[email protected]"],
"transport": {"type": "sns", "topic": "zebra"}}]},
session_factory=session_factory)
self.assertEqual(
policy.resource_manager.actions[0].prepare_resources(
[{'c7n:user-data': 'xyz', 'Id': 'i-123'}]),
[{'Id': 'i-123'}])
policy = self.load_policy(
{"name": "notify-sns",
"resource": "launch-config",
"actions": [
{"type": "notify", "to": ["[email protected]"],
"transport": {"type": "sns", "topic": "zebra"}}]},
session_factory=session_factory)
self.assertEqual(
policy.resource_manager.actions[0].prepare_resources(
[{'UserData': 'xyz', 'Id': 'l-123'}]),
[{'Id': 'l-123'}])
policy = self.load_policy(
{"name": "notify-sns",
"resource": "asg",
"actions": [
{"type": "notify", "to": ["[email protected]"],
"transport": {"type": "sns", "topic": "zebra"}}]},
session_factory=session_factory)
self.assertEqual(
policy.resource_manager.actions[0].prepare_resources(
[{'c7n:user-data': 'xyz', 'Id': 'a-123'}]),
[{'Id': 'a-123'}])
def test_sns_notify(self):
session_factory = self.replay_flight_data("test_sns_notify_action")
client = session_factory().client("sns")
topic = client.create_topic(Name="c7n-notify-test")["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic)
policy = self.load_policy(
{
"name": "notify-sns",
"resource": "sns",
"filters": [{"TopicArn": topic}],
"actions": [
{
"type": "notify",
"to": ["[email protected]"],
"transport": {"type": "sns", "topic": topic},
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
def test_sns_notify_with_msg_attr(self):
session_factory = self.replay_flight_data("test_sns_notify_action_with_msg_attr")
sqs = session_factory().client('sqs')
sns = session_factory().client('sns')
topic = 'arn:aws:sns:us-east-1:644160558196:test'
policy = {
"name": "notify-sns-with-attr",
"resource": "sns",
"actions": [
{
"type": "notify",
"to": ["[email protected]"],
"transport": {
"type": "sns",
"topic": topic,
"attributes": {"mtype": "test"}
},
}
],
}
self.assertRaises(PolicyValidationError, self.load_policy, policy)
policy['actions'][0]['transport']['attributes'] = {'good-attr': 'value'}
self.assertTrue(self.load_policy(policy, validate=True))
messages = sqs.receive_message(
QueueUrl='https://sqs.us-east-1.amazonaws.com/644160558196/test-queue'
).get('Messages')
self.assertFalse(messages)
subscription = sns.list_subscriptions_by_topic(
TopicArn=topic)['Subscriptions'][0]['Endpoint']
self.assertEqual(subscription, 'arn:aws:sqs:us-east-1:644160558196:test-queue')
self.load_policy(policy, session_factory=session_factory).run()
if self.recording:
time.sleep(20)
message_body = json.loads(sqs.receive_message(
QueueUrl='https://sqs.us-east-1.amazonaws.com/644160558196/test-queue'
).get('Messages')[0]['Body'])
self.assertTrue('mtype' in message_body['MessageAttributes'])
self.assertTrue('good-attr' in message_body['MessageAttributes'])
def test_notify(self):
session_factory = self.replay_flight_data("test_notify_action", zdata=True)
policy = self.load_policy(
{
"name": "instance-check",
"resource": "ec2",
"filters": [{"tag:Testing": "Testing123"}],
"actions": [
{
"type": "notify",
"to": ["[email protected]"],
"transport": {
"type": "sqs",
"queue": (
"https://sqs.us-west-2.amazonaws.com/"
"619193117841/custodian-messages"),
},
}
],
},
session_factory=session_factory,
)
resources = policy.poll()
self.assertJmes('[]."c7n:MatchedFilters"', resources, [["tag:Testing"]])
@functional
def test_notify_region_var(self):
session_factory = self.replay_flight_data("test_notify_region_var")
ec2 = session_factory().resource("ec2")
instance = ec2.create_instances(ImageId="ami-6057e21a", MinCount=1, MaxCount=1)[
0
].id
ec2_client = session_factory().client("ec2")
ec2_client.create_tags(
Resources=[instance], Tags=[{"Key": "k1", "Value": "v1"}]
)
self.addCleanup(ec2_client.terminate_instances, InstanceIds=[instance])
sqs_client = session_factory().client("sqs")
queue_url = sqs_client.create_queue(QueueName="c7n-test-q")["QueueUrl"]
self.addCleanup(sqs_client.delete_queue, QueueUrl=queue_url)
region_format = {"region": "us-east-1"}
if self.recording:
time.sleep(30)
policy = self.load_policy(
{
"name": "instance-check",
"resource": "ec2",
"filters": [{"tag:k1": "v1"}],
"actions": [
{
"type": "notify",
"to": ["[email protected]"],
"transport": {
"type": "sqs",
"queue": "arn:aws:sqs:{region}:123456789012:c7n-test-q",
},
}
],
},
config={"region": "us-east-1"},
session_factory=session_factory,
)
resources = policy.poll()
self.assertJmes('[]."c7n:MatchedFilters"', resources, [["tag:k1"]])
messages = sqs_client.receive_message(
QueueUrl=queue_url.format(**region_format), AttributeNames=["All"]
).get(
"Messages", []
)
self.assertEqual(len(messages), 1)
body = json.loads(zlib.decompress(base64.b64decode(messages[0]["Body"])))
self.assertTrue("tag:k1" in body.get("resources")[0].get("c7n:MatchedFilters"))
|
apache-2.0
|
portnov/sverchok
|
nodes/modifier_make/polygons_adaptative.py
|
3
|
5472
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import FloatProperty
import bmesh
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import (updateNode, Vector_generate,
Vector_degenerate, match_long_repeat)
from sverchok.utils.sv_bmesh_utils import bmesh_from_pydata
# "coauthor": "Alessandro Zomparelli (sketchesofcode)"
class AdaptivePolsNode(bpy.types.Node, SverchCustomTreeNode):
''' Make spread one object on another adaptively polygons of mesh (not including matrixes, so apply scale-rot-loc ctrl+A) '''
bl_idname = 'AdaptivePolsNode'
bl_label = 'Adaptive Polygons'
bl_icon = 'OUTLINER_OB_EMPTY'
width_coef = FloatProperty(name='width_coef',
description='with coefficient for sverchok adaptivepols donors size',
default=1.0, max=3.0, min=0.5,
update=updateNode)
def sv_init(self, context):
self.inputs.new('VerticesSocket', "VersR", "VersR")
self.inputs.new('StringsSocket', "PolsR", "PolsR")
self.inputs.new('VerticesSocket', "VersD", "VersD")
self.inputs.new('StringsSocket', "PolsD", "PolsD")
self.inputs.new('StringsSocket', "Z_Coef", "Z_Coef")
self.outputs.new('VerticesSocket', "Vertices", "Vertices")
self.outputs.new('StringsSocket', "Poligons", "Poligons")
def draw_buttons(self, context, layout):
layout.prop(self, "width_coef", text="donor width")
def lerp(self, v1, v2, v3, v4, v):
v12 = v1 + (v2-v1)*v[0] + ((v2-v1)/2)
v43 = v4 + (v3-v4)*v[0] + ((v3-v4)/2)
return v12 + (v43-v12)*v[1] + ((v43-v12)/2)
def lerp2(self, v1, v2, v3, v4, v, x, y):
v12 = v1 + (v2-v1)*v[0]*x + ((v2-v1)/2)
v43 = v4 + (v3-v4)*v[0]*x + ((v3-v4)/2)
return v12 + (v43-v12)*v[1]*y + ((v43-v12)/2)
def lerp3(self, v1, v2, v3, v4, v, x, y, z):
loc = self.lerp2(v1.co, v2.co, v3.co, v4.co, v, x, y)
nor = self.lerp(v1.normal, v2.normal, v3.normal, v4.normal, v)
nor.normalize()
#print (loc, nor, v[2], z)
return loc + nor*v[2]*z
def process(self):
# достаём два слота - вершины и полики
if all(s.is_linked for s in self.inputs[:-1]):
if self.inputs['Z_Coef'].is_linked:
z_coef = self.inputs['Z_Coef'].sv_get()[0]
else:
z_coef = []
polsR = self.inputs['PolsR'].sv_get()[0] # recipient one object [0]
versR = self.inputs['VersR'].sv_get()[0] # recipient
polsD = self.inputs['PolsD'].sv_get() # donor many objects [:]
versD_ = self.inputs['VersD'].sv_get() # donor
versD = Vector_generate(versD_)
polsR, polsD, versD = match_long_repeat([polsR, polsD, versD])
bm = bmesh_from_pydata(versR, [], polsR, normal_update=True)
bm.verts.ensure_lookup_table()
new_ve = bm.verts
vers_out = []
pols_out = []
i = 0
for vD, pR in zip(versD, polsR):
# part of donor to make limits
j = i
pD = polsD[i]
xx = [x[0] for x in vD]
x0 = (self.width_coef) / (max(xx)-min(xx))
yy = [y[1] for y in vD]
y0 = (self.width_coef) / (max(yy)-min(yy))
zz = [z[2] for z in vD]
zzz = (max(zz)-min(zz))
if zzz:
z0 = 1 / zzz
else:
z0 = 0
# part of recipient polygons to reciev donor
last = len(pR)-1
vs = [new_ve[v] for v in pR] # new_ve - temporery data
if z_coef:
if j < len(z_coef):
z1 = z0 * z_coef[j]
else:
z1 = z0
new_vers = []
new_pols = []
for v in vD:
new_vers.append(self.lerp3(vs[0], vs[1], vs[2], vs[last], v, x0, y0, z1))
for p in pD:
new_pols.append([id for id in p])
pols_out.append(new_pols)
vers_out.append(new_vers)
i += 1
bm.free()
output = Vector_degenerate(vers_out)
self.outputs['Vertices'].sv_set(output)
self.outputs['Poligons'].sv_set(pols_out)
def register():
bpy.utils.register_class(AdaptivePolsNode)
def unregister():
bpy.utils.unregister_class(AdaptivePolsNode)
#if __name__ == '__main__':
# register()
|
gpl-3.0
|
sanjeevtripurari/hue
|
desktop/core/ext-py/pyasn1-0.1.8/test/type/test_constraint.py
|
53
|
8746
|
from pyasn1.type import constraint, error
from pyasn1.error import PyAsn1Error
from sys import version_info
if version_info[0:2] < (2, 7) or \
version_info[0:2] in ( (3, 0), (3, 1) ):
try:
import unittest2 as unittest
except ImportError:
import unittest
else:
import unittest
class SingleValueConstraintTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.SingleValueConstraint(1,2)
self.c2 = constraint.SingleValueConstraint(3,4)
def testCmp(self): assert self.c1 == self.c1, 'comparation fails'
def testHash(self): assert hash(self.c1) != hash(self.c2), 'hash() fails'
def testGoodVal(self):
try:
self.c1(1)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ContainedSubtypeConstraintTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ContainedSubtypeConstraint(
constraint.SingleValueConstraint(12)
)
def testGoodVal(self):
try:
self.c1(12)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ValueRangeConstraintTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ValueRangeConstraint(1,4)
def testGoodVal(self):
try:
self.c1(1)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ValueSizeConstraintTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ValueSizeConstraint(1,2)
def testGoodVal(self):
try:
self.c1('a')
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1('abc')
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class PermittedAlphabetConstraintTestCase(SingleValueConstraintTestCase):
def setUp(self):
self.c1 = constraint.PermittedAlphabetConstraint('A', 'B', 'C')
self.c2 = constraint.PermittedAlphabetConstraint('DEF')
def testGoodVal(self):
try:
self.c1('A')
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1('E')
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsIntersectionTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsIntersection(
constraint.SingleValueConstraint(4),
constraint.ValueRangeConstraint(2, 4)
)
def testCmp1(self):
assert constraint.SingleValueConstraint(4) in self.c1, '__cmp__() fails'
def testCmp2(self):
assert constraint.SingleValueConstraint(5) not in self.c1, \
'__cmp__() fails'
def testCmp3(self):
c = constraint.ConstraintsUnion(constraint.ConstraintsIntersection(
constraint.SingleValueConstraint(4),
constraint.ValueRangeConstraint(2, 4)
))
assert self.c1 in c, '__cmp__() fails'
def testCmp4(self):
c = constraint.ConstraintsUnion(
constraint.ConstraintsIntersection(constraint.SingleValueConstraint(5))
)
assert self.c1 not in c, '__cmp__() fails'
def testGoodVal(self):
try:
self.c1(4)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class InnerTypeConstraintTestCase(unittest.TestCase):
def testConst1(self):
c = constraint.InnerTypeConstraint(
constraint.SingleValueConstraint(4)
)
try:
c(4, 32)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
try:
c(5, 32)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
def testConst2(self):
c = constraint.InnerTypeConstraint(
(0, constraint.SingleValueConstraint(4), 'PRESENT'),
(1, constraint.SingleValueConstraint(4), 'ABSENT')
)
try:
c(4, 0)
except error.ValueConstraintError:
raise
assert 0, 'constraint check fails'
try:
c(4, 1)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
try:
c(3, 0)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
# Constraints compositions
class ConstraintsIntersectionTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsIntersection(
constraint.ValueRangeConstraint(1, 9),
constraint.ValueRangeConstraint(2, 5)
)
def testGoodVal(self):
try:
self.c1(3)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(0)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsUnionTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsUnion(
constraint.SingleValueConstraint(5),
constraint.ValueRangeConstraint(1, 3)
)
def testGoodVal(self):
try:
self.c1(2)
self.c1(5)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(-5)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
class ConstraintsExclusionTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsExclusion(
constraint.ValueRangeConstraint(2, 4)
)
def testGoodVal(self):
try:
self.c1(6)
except error.ValueConstraintError:
assert 0, 'constraint check fails'
def testBadVal(self):
try:
self.c1(2)
except error.ValueConstraintError:
pass
else:
assert 0, 'constraint check fails'
# Constraints derivations
class DirectDerivationTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.SingleValueConstraint(5)
self.c2 = constraint.ConstraintsUnion(
self.c1, constraint.ValueRangeConstraint(1, 3)
)
def testGoodVal(self):
assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
assert not self.c1.isSubTypeOf(self.c2) , 'isSubTypeOf failed'
def testBadVal(self):
assert not self.c2.isSuperTypeOf(self.c1) , 'isSuperTypeOf failed'
assert self.c2.isSubTypeOf(self.c1) , 'isSubTypeOf failed'
class IndirectDerivationTestCase(unittest.TestCase):
def setUp(self):
self.c1 = constraint.ConstraintsIntersection(
constraint.ValueRangeConstraint(1, 30)
)
self.c2 = constraint.ConstraintsIntersection(
self.c1, constraint.ValueRangeConstraint(1, 20)
)
self.c2 = constraint.ConstraintsIntersection(
self.c2, constraint.ValueRangeConstraint(1, 10)
)
def testGoodVal(self):
assert self.c1.isSuperTypeOf(self.c2), 'isSuperTypeOf failed'
assert not self.c1.isSubTypeOf(self.c2) , 'isSubTypeOf failed'
def testBadVal(self):
assert not self.c2.isSuperTypeOf(self.c1) , 'isSuperTypeOf failed'
assert self.c2.isSubTypeOf(self.c1) , 'isSubTypeOf failed'
if __name__ == '__main__': unittest.main()
# how to apply size constriants to constructed types?
|
apache-2.0
|
UOMx/edx-platform
|
common/lib/xmodule/xmodule/poll_module.py
|
146
|
7498
|
"""Poll module is ungraded xmodule used by students to
to do set of polls.
On the client side we show:
If student does not yet anwered - Question with set of choices.
If student have answered - Question with statistics for each answers.
"""
import cgi
import json
import logging
from copy import deepcopy
from collections import OrderedDict
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.stringify import stringify_children
from xmodule.mako_module import MakoModuleDescriptor
from xmodule.xml_module import XmlDescriptor
from xblock.fields import Scope, String, Dict, Boolean, List
log = logging.getLogger(__name__)
class PollFields(object):
# Name of poll to use in links to this poll
display_name = String(help="Display name for this module", scope=Scope.settings)
voted = Boolean(help="Whether this student has voted on the poll", scope=Scope.user_state, default=False)
poll_answer = String(help="Student answer", scope=Scope.user_state, default='')
poll_answers = Dict(help="Poll answers from all students", scope=Scope.user_state_summary)
# List of answers, in the form {'id': 'some id', 'text': 'the answer text'}
answers = List(help="Poll answers from xml", scope=Scope.content, default=[])
question = String(help="Poll question", scope=Scope.content, default='')
class PollModule(PollFields, XModule):
"""Poll Module"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [
resource_string(__name__, 'js/src/poll/poll.js'),
resource_string(__name__, 'js/src/poll/poll_main.js')
]
}
css = {'scss': [resource_string(__name__, 'css/poll/display.scss')]}
js_module_name = "Poll"
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request data parameters
Returns:
json string
"""
if dispatch in self.poll_answers and not self.voted:
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[dispatch] += 1
self.poll_answers = temp_poll_answers
self.voted = True
self.poll_answer = dispatch
return json.dumps({'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values()),
'callback': {'objectName': 'Conditional'}
})
elif dispatch == 'get_state':
return json.dumps({'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values())
})
elif dispatch == 'reset_poll' and self.voted and \
self.descriptor.xml_attributes.get('reset', 'True').lower() != 'false':
self.voted = False
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[self.poll_answer] -= 1
self.poll_answers = temp_poll_answers
self.poll_answer = ''
return json.dumps({'status': 'success'})
else: # return error message
return json.dumps({'error': 'Unknown Command!'})
def get_html(self):
"""Renders parameters to template."""
params = {
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'configuration_json': self.dump_poll(),
}
self.content = self.system.render_template('poll.html', params)
return self.content
def dump_poll(self):
"""Dump poll information.
Returns:
string - Serialize json.
"""
# FIXME: hack for resolving caching `default={}` during definition
# poll_answers field
if self.poll_answers is None:
self.poll_answers = {}
answers_to_json = OrderedDict()
# FIXME: fix this, when xblock support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
# Fill self.poll_answers, prepare data for template context.
for answer in self.answers:
# Set default count for answer = 0.
if answer['id'] not in temp_poll_answers:
temp_poll_answers[answer['id']] = 0
answers_to_json[answer['id']] = cgi.escape(answer['text'])
self.poll_answers = temp_poll_answers
return json.dumps({
'answers': answers_to_json,
'question': cgi.escape(self.question),
# to show answered poll after reload:
'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers if self.voted else {},
'total': sum(self.poll_answers.values()) if self.voted else 0,
'reset': str(self.descriptor.xml_attributes.get('reset', 'true')).lower()
})
class PollDescriptor(PollFields, MakoModuleDescriptor, XmlDescriptor):
_tag_name = 'poll_question'
_child_tag_name = 'answer'
module_class = PollModule
@classmethod
def definition_from_xml(cls, xml_object, system):
"""Pull out the data into dictionary.
Args:
xml_object: xml from file.
system: `system` object.
Returns:
(definition, children) - tuple
definition - dict:
{
'answers': <List of answers>,
'question': <Question string>
}
"""
# Check for presense of required tags in xml.
if len(xml_object.xpath(cls._child_tag_name)) == 0:
raise ValueError("Poll_question definition must include \
at least one 'answer' tag")
xml_object_copy = deepcopy(xml_object)
answers = []
for element_answer in xml_object_copy.findall(cls._child_tag_name):
answer_id = element_answer.get('id', None)
if answer_id:
answers.append({
'id': answer_id,
'text': stringify_children(element_answer)
})
xml_object_copy.remove(element_answer)
definition = {
'answers': answers,
'question': stringify_children(xml_object_copy)
}
children = []
return (definition, children)
def definition_to_xml(self, resource_fs):
"""Return an xml element representing to this definition."""
poll_str = u'<{tag_name}>{text}</{tag_name}>'.format(
tag_name=self._tag_name, text=self.question)
xml_object = etree.fromstring(poll_str)
xml_object.set('display_name', self.display_name)
def add_child(xml_obj, answer):
child_str = u'<{tag_name} id="{id}">{text}</{tag_name}>'.format(
tag_name=self._child_tag_name, id=answer['id'],
text=answer['text'])
child_node = etree.fromstring(child_str)
xml_object.append(child_node)
for answer in self.answers:
add_child(xml_object, answer)
return xml_object
|
agpl-3.0
|
2014c2g2/2015cdag2_test
|
static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py
|
733
|
66279
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
gpl-3.0
|
nalyd88/Algorithms
|
DP/LogCutting/LogCutting.py
|
1
|
2100
|
# Practicing Dynamic Programming (DP) with the log cutting problem given on the practice midterm.
import numpy as np
################################################################################
# Slow Algorithm
################################################################################
def cut_log(d, j, k):
if j+1 == k:
return 0
c = float('Inf')
for i in range(j+1, k):
c = min(c, d[k] + cut_log(d, j, i) + cut_log(d - d[i], i, k))
return c
################################################################################
# Top Down Algorithm
################################################################################
def memoized_cut_log(d):
k = len(d)
j = 0
r = np.ones([k, k])*np.inf
v = memoized_cut_log_aux(d, j, k-1, r)
return v
def memoized_cut_log_aux(d, j, k, r):
if r[j, k] < np.inf:
return r[j, k]
if j+1 == k:
r[j, k] = 0
else:
c = float('Inf')
for i in range(j+1, k):
c = min(c, d[k] + memoized_cut_log_aux(d, j, i, r) + memoized_cut_log_aux(d - d[i], i, k, r))
r[j, k] = c
return r[j, k]
################################################################################
# Bottom Up Algorithm
################################################################################
# def bottom_up_cut_log(d):
# k = len(d)
# r = np.zeros([k, k])
# for i in range(2, k):
# c = np.inf
# for j in range(1, i):
# c = min(c, d[i] + r[j, i-1] + r[j-1, i])
# r[j, i] = c
# print(r)
# return r[0, k-1]
################################################################################
# Main
################################################################################
if __name__ == '__main__':
dist = np.array([0, 3, 8, 10])
print("min cost (slow) = $" + str(cut_log(dist, 0, 3)))
print("min cost (top down) = $" + str(memoized_cut_log(dist)))
# print("min cost (slow) = $" + str(bottom_up_cut_log(dist)))
|
mit
|
blublud/networkx
|
networkx/algorithms/components/tests/test_semiconnected.py
|
64
|
1901
|
from itertools import chain
import networkx as nx
from nose.tools import *
class TestIsSemiconnected(object):
def test_undirected(self):
assert_raises(nx.NetworkXNotImplemented, nx.is_semiconnected,
nx.Graph())
assert_raises(nx.NetworkXNotImplemented, nx.is_semiconnected,
nx.MultiGraph())
def test_empty(self):
assert_raises(nx.NetworkXPointlessConcept, nx.is_semiconnected,
nx.DiGraph())
assert_raises(nx.NetworkXPointlessConcept, nx.is_semiconnected,
nx.MultiDiGraph())
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
ok_(nx.is_semiconnected(G))
def test_path(self):
G = nx.path_graph(100, create_using=nx.DiGraph())
ok_(nx.is_semiconnected(G))
G.add_edge(100, 99)
ok_(not nx.is_semiconnected(G))
def test_cycle(self):
G = nx.cycle_graph(100, create_using=nx.DiGraph())
ok_(nx.is_semiconnected(G))
G = nx.path_graph(100, create_using=nx.DiGraph())
G.add_edge(0, 99)
ok_(nx.is_semiconnected(G))
def test_tree(self):
G = nx.DiGraph()
G.add_edges_from(chain.from_iterable([(i, 2 * i + 1), (i, 2 * i + 2)]
for i in range(100)))
ok_(not nx.is_semiconnected(G))
def test_dumbbell(self):
G = nx.cycle_graph(100, create_using=nx.DiGraph())
G.add_edges_from((i + 100, (i + 1) % 100 + 100) for i in range(100))
ok_(not nx.is_semiconnected(G)) # G is disconnected.
G.add_edge(100, 99)
ok_(nx.is_semiconnected(G))
def test_alternating_path(self):
G = nx.DiGraph(chain.from_iterable([(i, i - 1), (i, i + 1)]
for i in range(0, 100, 2)))
ok_(not nx.is_semiconnected(G))
|
bsd-3-clause
|
psyhofreak/ft-engine
|
scripts/sql.bench.summary.py
|
16
|
2076
|
#!/usr/bin/env python
# summarize the sql-bench trace file
import sys
import re
import os.path
class testreports:
def __init__(self):
self.reports = []
def append(self, report):
self.reports.append(report)
def duration(self, start, stop):
t0 = os.popen('date -d"' + start + '" +%s').readline()
t1 = os.popen('date -d"' + stop + '" +%s').readline()
return int(t1) - int(t0)
def printit(self, i):
report = self.reports[i]
d = self.duration(report["start"], report["stop"])
print "%s %s %6u %s" % (report["result"].upper(), report["start"], d, report["name"])
# print self.reports[i]
def printall(self):
for i in range(len(self.reports)):
self.printit(i)
def stoptime(self, stoptime):
if len(self.reports) > 0:
lastreport = self.reports[-1]
lastreport["stop"] = stoptime
def main():
reports = testreports()
testreport = {}
while 1:
b = sys.stdin.readline()
if b == "": break
b = b.rstrip('\n')
match = re.match("^(\d{8} \d{2}:\d{2}:\d{2})$", b)
if match:
if totaltime == "" and testreport["result"] == "pass":
testreport["result"] = "fail"
testreport["stop"] = match.group(1)
reports.append(testreport)
testreport = {}
continue
match = re.match("^(\d{8} \d{2}:\d{2}:\d{2}) (test-.*)$", b)
if match:
testreport["start"] = match.group(1)
testreport["name"] = match.group(2)
testreport["result"] = "pass"
totaltime = ""
continue
match = re.match(".*Got error|.*Died at", b)
if match: testreport["result"] = "fail"
match = re.match("^Total time|^Estimated total time", b)
if match: totaltime = b
match = re.match("skip", b)
if match: testreport["result"] = "skip"
reports.printall()
return 0
sys.exit(main())
|
gpl-2.0
|
konstruktoid/ansible-upstream
|
lib/ansible/plugins/terminal/enos.py
|
101
|
2824
|
# (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Contains terminal Plugin methods for ENOS Config Module
# Lenovo Networking
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(br">[\r\n]?")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
]
def on_open_shell(self):
try:
for cmd in (b'\n', b'terminal-length 0\n'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text
# on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]?password: $",
errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd),
errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
msg = 'unable to elevate privilege to enable mode'
raise AnsibleConnectionFailure(msg)
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
|
gpl-3.0
|
shinpeimuraoka/ryu
|
ryu/tests/unit/ofproto/test_parser_v13.py
|
27
|
4999
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
import six
import socket
from struct import *
from nose.tools import *
from ryu.ofproto.ofproto_v1_3_parser import *
from ryu.ofproto import ofproto_v1_3_parser
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_protocol
from ryu.ofproto import ether
from ryu.ofproto.ofproto_parser import MsgBase
from ryu import utils
from ryu.lib import addrconv
LOG = logging.getLogger('test_ofproto_v13')
_Datapath = ofproto_protocol.ProtocolDesc(version=ofproto_v1_3.OFP_VERSION)
class TestOFPMatch(unittest.TestCase):
""" Test case for ofproto_v1_3_parser.OFPMatch
"""
def test_init(self):
res = OFPMatch()
# wc check
eq_(res._wc.vlan_vid_mask, 0)
# flow check
eq_(res._flow.vlan_vid, 0)
def _test_serialize_and_parser(self, match, header, value, mask=None):
cls_ = OFPMatchField._FIELDS_HEADERS.get(header)
pack_str = cls_.pack_str.replace('!', '')
fmt = '!HHI' + pack_str
# serialize
buf = bytearray()
length = match.serialize(buf, 0)
eq_(length, len(buf))
if mask and len(buf) > calcsize(fmt):
fmt += pack_str
res = list(unpack_from(fmt, six.binary_type(buf), 0)[3:])
if type(value) is list:
res_value = res[:calcsize(pack_str) // 2]
eq_(res_value, value)
if mask:
res_mask = res[calcsize(pack_str) // 2:]
eq_(res_mask, mask)
else:
res_value = res.pop(0)
if cls_.__name__ == 'MTVlanVid':
eq_(res_value, value | ofproto.OFPVID_PRESENT)
else:
eq_(res_value, value)
if mask and res and res[0]:
res_mask = res[0]
eq_(res_mask, mask)
# parser
res = match.parser(six.binary_type(buf), 0)
eq_(res.type, ofproto.OFPMT_OXM)
eq_(res.fields[0].header, header)
eq_(res.fields[0].value, value)
if mask and res.fields[0].mask is not None:
eq_(res.fields[0].mask, mask)
# to_jsondict
jsondict = match.to_jsondict()
# from_jsondict
match2 = match.from_jsondict(jsondict["OFPMatch"])
buf2 = bytearray()
match2.serialize(buf2, 0)
eq_(str(match), str(match2))
eq_(buf, buf2)
# set_vlan_vid
def _test_set_vlan_vid(self, vid, mask=None):
header = ofproto.OXM_OF_VLAN_VID
match = OFPMatch()
if mask is None:
match.set_vlan_vid(vid)
else:
header = ofproto.OXM_OF_VLAN_VID_W
match.set_vlan_vid_masked(vid, mask)
self._test_serialize_and_parser(match, header, vid, mask)
def _test_set_vlan_vid_none(self):
header = ofproto.OXM_OF_VLAN_VID
match = OFPMatch()
match.set_vlan_vid_none()
value = ofproto.OFPVID_NONE
cls_ = OFPMatchField._FIELDS_HEADERS.get(header)
pack_str = cls_.pack_str.replace('!', '')
fmt = '!HHI' + pack_str
# serialize
buf = bytearray()
length = match.serialize(buf, 0)
eq_(length, len(buf))
res = list(unpack_from(fmt, six.binary_type(buf), 0)[3:])
res_value = res.pop(0)
eq_(res_value, value)
# parser
res = match.parser(six.binary_type(buf), 0)
eq_(res.type, ofproto.OFPMT_OXM)
eq_(res.fields[0].header, header)
eq_(res.fields[0].value, value)
# to_jsondict
jsondict = match.to_jsondict()
# from_jsondict
match2 = match.from_jsondict(jsondict["OFPMatch"])
buf2 = bytearray()
match2.serialize(buf2, 0)
eq_(str(match), str(match2))
eq_(buf, buf2)
def test_set_vlan_vid_mid(self):
self._test_set_vlan_vid(2047)
def test_set_vlan_vid_max(self):
self._test_set_vlan_vid(0xfff)
def test_set_vlan_vid_min(self):
self._test_set_vlan_vid(0)
def test_set_vlan_vid_masked_mid(self):
self._test_set_vlan_vid(2047, 0xf0f)
def test_set_vlan_vid_masked_max(self):
self._test_set_vlan_vid(2047, 0xfff)
def test_set_vlan_vid_masked_min(self):
self._test_set_vlan_vid(2047, 0)
def test_set_vlan_vid_none(self):
self._test_set_vlan_vid_none()
|
apache-2.0
|
guoci/python3-xlib-trunk
|
Xlib/ext/record.py
|
1
|
9344
|
# Xlib.ext.record -- RECORD extension module
#
# Copyright (C) 2006 Alex Badea <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from Xlib import X
from Xlib.protocol import rq
extname = 'RECORD'
FromServerTime = 0x01
FromClientTime = 0x02
FromClientSequence = 0x04
CurrentClients = 1
FutureClients = 2
AllClients = 3
FromServer = 0
FromClient = 1
ClientStarted = 2
ClientDied = 3
StartOfData = 4
EndOfData = 5
Record_Range8 = rq.Struct(
rq.Card8('first'),
rq.Card8('last'))
Record_Range16 = rq.Struct(
rq.Card16('first'),
rq.Card16('last'))
Record_ExtRange = rq.Struct(
rq.Object('major_range', Record_Range8),
rq.Object('minor_range', Record_Range16))
Record_Range = rq.Struct(
rq.Object('core_requests', Record_Range8),
rq.Object('core_replies', Record_Range8),
rq.Object('ext_requests', Record_ExtRange),
rq.Object('ext_replies', Record_ExtRange),
rq.Object('delivered_events', Record_Range8),
rq.Object('device_events', Record_Range8),
rq.Object('errors', Record_Range8),
rq.Bool('client_started'),
rq.Bool('client_died'))
Record_ClientInfo = rq.Struct(
rq.Card32('client_resource'),
rq.LengthOf('ranges', 4),
rq.List('ranges', Record_Range))
class RawField(rq.ValueField):
"""A field with raw data, stored as a string"""
structcode = None
def pack_value(self, val):
return val, len(val), None
def parse_binary_value(self, data, display, length, format):
return data, ''
class GetVersion(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(0),
rq.RequestLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'))
_reply = rq.Struct(
rq.Pad(2),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
rq.Pad(20))
def get_version(self, major, minor):
return GetVersion(
display = self.display,
opcode = self.display.get_extension_major(extname),
major_version = major,
minor_version = minor)
class CreateContext(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(1),
rq.RequestLength(),
rq.Card32('context'), # Record_RC
rq.Card8('element_header'), # Record_Element_Header
rq.Pad(3),
rq.LengthOf('clients', 4),
rq.LengthOf('ranges', 4),
rq.List('clients', rq.Card32Obj),
rq.List('ranges', Record_Range))
def create_context(self, datum_flags, clients, ranges):
context = self.display.allocate_resource_id()
CreateContext(
display = self.display,
opcode = self.display.get_extension_major(extname),
context = context,
element_header = datum_flags,
clients = clients,
ranges = ranges)
return context
class RegisterClients(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(2),
rq.RequestLength(),
rq.Card32('context'), # Record_RC
rq.Card8('element_header'), # Record_Element_Header
rq.Pad(3),
rq.LengthOf('clients', 4),
rq.LengthOf('ranges', 4),
rq.List('clients', rq.Card32Obj),
rq.List('ranges', Record_Range))
def register_clients(self, context, element_header, clients, ranges):
RegisterClients(
display = self.display,
opcode = self.display.get_extension_major(extname),
context = context,
element_header = element_header,
clients = clients,
ranges = ranges)
class UnregisterClients(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(3),
rq.RequestLength(),
rq.Card32('context'), # Record_RC
rq.LengthOf('clients', 4),
rq.List('clients', rq.Card32Obj))
def unregister_clients(self, context, clients):
UnregisterClients(
display = self.display,
opcode = self.display.get_extension_major(extname),
context = context,
clients = clients)
class GetContext(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(4),
rq.RequestLength(),
rq.Card32('context')) # Record_RC
_reply = rq.Struct(
rq.Pad(2),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card8('element_header'), # Record_Element_Header
rq.Pad(3),
rq.LengthOf('client_info', 4),
rq.Pad(16),
rq.List('client_info', Record_ClientInfo))
def get_context(self, context):
return GetContext(
display = self.display,
opcode = self.display.get_extension_major(extname),
context = context)
class EnableContext(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(5),
rq.RequestLength(),
rq.Card32('context')) # Record_RC
_reply = rq.Struct(
rq.Pad(1),
rq.Card8('category'),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card8('element_header'), # Record_Element_Header
rq.Bool('client_swapped'),
rq.Pad(2),
rq.Card32('id_base'), # Record_XIDBase
rq.Card32('server_time'),
rq.Card32('recorded_sequence_number'),
rq.Pad(8),
RawField('data'))
# This request receives multiple responses, so we need to keep
# ourselves in the 'sent_requests' list in order to receive them all.
# See the discussion on ListFonstsWithInfo in request.py
def __init__(self, callback, *args, **keys):
self._callback = callback
rq.ReplyRequest.__init__(*(self, ) + args, **keys)
def _parse_response(self, data):
r, d = self._reply.parse_binary(data, self._display)
self._callback(r)
if r.category == StartOfData:
# Hack ourselves a sequence number, used by the code in
# Xlib.protocol.display.Display.parse_request_response()
self.sequence_number = r.sequence_number
if r.category == EndOfData:
self._response_lock.acquire()
self._data = r
self._response_lock.release()
else:
self._display.sent_requests.insert(0, self)
def enable_context(self, context, callback):
EnableContext(
callback = callback,
display = self.display,
opcode = self.display.get_extension_major(extname),
context = context)
class DisableContext(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(6),
rq.RequestLength(),
rq.Card32('context')) # Record_RC
def disable_context(self, context):
DisableContext(
display = self.display,
opcode = self.display.get_extension_major(extname),
context = context)
class FreeContext(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(7),
rq.RequestLength(),
rq.Card32('context')) # Record_RC
def free_context(self, context):
FreeContext(
display = self.display,
opcode = self.display.get_extension_major(extname),
context = context)
self.display.free_resource_id(context)
def init(disp, info):
disp.extension_add_method('display', 'record_get_version', get_version)
disp.extension_add_method('display', 'record_create_context', create_context)
disp.extension_add_method('display', 'record_register_clients', register_clients)
disp.extension_add_method('display', 'record_unregister_clients', unregister_clients)
disp.extension_add_method('display', 'record_get_context', get_context)
disp.extension_add_method('display', 'record_enable_context', enable_context)
disp.extension_add_method('display', 'record_disable_context', disable_context)
disp.extension_add_method('display', 'record_free_context', free_context)
|
gpl-2.0
|
yuifan/pexus4_external_chromium-trace
|
trace-viewer/third_party/pywebsocket/src/mod_pywebsocket/extensions.py
|
29
|
23947
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket.http_header_util import quote_if_necessary
_available_processors = {}
class ExtensionProcessorInterface(object):
def name(self):
return None
def get_extension_response(self):
return None
def setup_stream_options(self, stream_options):
pass
class DeflateStreamExtensionProcessor(ExtensionProcessorInterface):
"""WebSocket DEFLATE stream extension processor."""
def __init__(self, request):
self._logger = util.get_class_logger(self)
self._request = request
def name(self):
return common.DEFLATE_STREAM_EXTENSION
def get_extension_response(self):
if len(self._request.get_parameter_names()) != 0:
return None
self._logger.debug(
'Enable %s extension', common.DEFLATE_STREAM_EXTENSION)
return common.ExtensionParameter(common.DEFLATE_STREAM_EXTENSION)
def setup_stream_options(self, stream_options):
stream_options.deflate_stream = True
_available_processors[common.DEFLATE_STREAM_EXTENSION] = (
DeflateStreamExtensionProcessor)
def _log_compression_ratio(logger, original_bytes, total_original_bytes,
filtered_bytes, total_filtered_bytes):
# Print inf when ratio is not available.
ratio = float('inf')
average_ratio = float('inf')
if original_bytes != 0:
ratio = float(filtered_bytes) / original_bytes
if total_original_bytes != 0:
average_ratio = (
float(total_filtered_bytes) / total_original_bytes)
logger.debug('Outgoing compress ratio: %f (average: %f)' %
(ratio, average_ratio))
def _log_decompression_ratio(logger, received_bytes, total_received_bytes,
filtered_bytes, total_filtered_bytes):
# Print inf when ratio is not available.
ratio = float('inf')
average_ratio = float('inf')
if received_bytes != 0:
ratio = float(received_bytes) / filtered_bytes
if total_filtered_bytes != 0:
average_ratio = (
float(total_received_bytes) / total_filtered_bytes)
logger.debug('Incoming compress ratio: %f (average: %f)' %
(ratio, average_ratio))
class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
"""WebSocket Per-frame DEFLATE extension processor."""
_WINDOW_BITS_PARAM = 'max_window_bits'
_NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
def __init__(self, request):
self._logger = util.get_class_logger(self)
self._request = request
self._response_window_bits = None
self._response_no_context_takeover = False
# Counters for statistics.
# Total number of outgoing bytes supplied to this filter.
self._total_outgoing_payload_bytes = 0
# Total number of bytes sent to the network after applying this filter.
self._total_filtered_outgoing_payload_bytes = 0
# Total number of bytes received from the network.
self._total_incoming_payload_bytes = 0
# Total number of incoming bytes obtained after applying this filter.
self._total_filtered_incoming_payload_bytes = 0
def name(self):
return common.DEFLATE_FRAME_EXTENSION
def get_extension_response(self):
# Any unknown parameter will be just ignored.
window_bits = self._request.get_parameter_value(
self._WINDOW_BITS_PARAM)
no_context_takeover = self._request.has_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM)
if (no_context_takeover and
self._request.get_parameter_value(
self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
return None
if window_bits is not None:
try:
window_bits = int(window_bits)
except ValueError, e:
return None
if window_bits < 8 or window_bits > 15:
return None
self._deflater = util._RFC1979Deflater(
window_bits, no_context_takeover)
self._inflater = util._RFC1979Inflater()
self._compress_outgoing = True
response = common.ExtensionParameter(self._request.name())
if self._response_window_bits is not None:
response.add_parameter(
self._WINDOW_BITS_PARAM, str(self._response_window_bits))
if self._response_no_context_takeover:
response.add_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: window_bits=%s; no_context_takeover=%r, '
'response: window_wbits=%s; no_context_takeover=%r)' %
(self._request.name(),
window_bits,
no_context_takeover,
self._response_window_bits,
self._response_no_context_takeover))
return response
def setup_stream_options(self, stream_options):
class _OutgoingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._outgoing_filter(frame)
class _IncomingFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._incoming_filter(frame)
stream_options.outgoing_frame_filters.append(
_OutgoingFilter(self))
stream_options.incoming_frame_filters.insert(
0, _IncomingFilter(self))
def set_response_window_bits(self, value):
self._response_window_bits = value
def set_response_no_context_takeover(self, value):
self._response_no_context_takeover = value
def enable_outgoing_compression(self):
self._compress_outgoing = True
def disable_outgoing_compression(self):
self._compress_outgoing = False
def _outgoing_filter(self, frame):
"""Transform outgoing frames. This method is called only by
an _OutgoingFilter instance.
"""
original_payload_size = len(frame.payload)
self._total_outgoing_payload_bytes += original_payload_size
if (not self._compress_outgoing or
common.is_control_opcode(frame.opcode)):
self._total_filtered_outgoing_payload_bytes += (
original_payload_size)
return
frame.payload = self._deflater.filter(frame.payload)
frame.rsv1 = 1
filtered_payload_size = len(frame.payload)
self._total_filtered_outgoing_payload_bytes += filtered_payload_size
_log_compression_ratio(self._logger, original_payload_size,
self._total_outgoing_payload_bytes,
filtered_payload_size,
self._total_filtered_outgoing_payload_bytes)
def _incoming_filter(self, frame):
"""Transform incoming frames. This method is called only by
an _IncomingFilter instance.
"""
received_payload_size = len(frame.payload)
self._total_incoming_payload_bytes += received_payload_size
if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
self._total_filtered_incoming_payload_bytes += (
received_payload_size)
return
frame.payload = self._inflater.filter(frame.payload)
frame.rsv1 = 0
filtered_payload_size = len(frame.payload)
self._total_filtered_incoming_payload_bytes += filtered_payload_size
_log_decompression_ratio(self._logger, received_payload_size,
self._total_incoming_payload_bytes,
filtered_payload_size,
self._total_filtered_incoming_payload_bytes)
_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
# Adding vendor-prefixed deflate-frame extension.
# TODO(bashi): Remove this after WebKit stops using vender prefix.
_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
def _parse_compression_method(data):
"""Parses the value of "method" extension parameter."""
return common.parse_extensions(data, allow_quoted_string=True)
def _create_accepted_method_desc(method_name, method_params):
"""Creates accepted-method-desc from given method name and parameters"""
extension = common.ExtensionParameter(method_name)
for name, value in method_params:
extension.add_parameter(name, value)
return common.format_extension(extension)
class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
"""Base class for Per-frame and Per-message compression extension."""
_METHOD_PARAM = 'method'
def __init__(self, request):
self._logger = util.get_class_logger(self)
self._request = request
self._compression_method_name = None
self._compression_processor = None
def name(self):
return ''
def _lookup_compression_processor(self, method_desc):
return None
def _get_compression_processor_response(self):
"""Looks up the compression processor based on the self._request and
returns the compression processor's response.
"""
method_list = self._request.get_parameter_value(self._METHOD_PARAM)
if method_list is None:
return None
methods = _parse_compression_method(method_list)
if methods is None:
return None
comression_processor = None
# The current implementation tries only the first method that matches
# supported algorithm. Following methods aren't tried even if the
# first one is rejected.
# TODO(bashi): Need to clarify this behavior.
for method_desc in methods:
compression_processor = self._lookup_compression_processor(
method_desc)
if compression_processor is not None:
self._compression_method_name = method_desc.name()
break
if compression_processor is None:
return None
processor_response = compression_processor.get_extension_response()
if processor_response is None:
return None
self._compression_processor = compression_processor
return processor_response
def get_extension_response(self):
processor_response = self._get_compression_processor_response()
if processor_response is None:
return None
response = common.ExtensionParameter(self._request.name())
accepted_method_desc = _create_accepted_method_desc(
self._compression_method_name,
processor_response.get_parameters())
response.add_parameter(self._METHOD_PARAM, accepted_method_desc)
self._logger.debug(
'Enable %s extension (method: %s)' %
(self._request.name(), self._compression_method_name))
return response
def setup_stream_options(self, stream_options):
if self._compression_processor is None:
return
self._compression_processor.setup_stream_options(stream_options)
def get_compression_processor(self):
return self._compression_processor
class PerFrameCompressionExtensionProcessor(CompressionExtensionProcessorBase):
"""WebSocket Per-frame compression extension processor."""
_DEFLATE_METHOD = 'deflate'
def __init__(self, request):
CompressionExtensionProcessorBase.__init__(self, request)
def name(self):
return common.PERFRAME_COMPRESSION_EXTENSION
def _lookup_compression_processor(self, method_desc):
if method_desc.name() == self._DEFLATE_METHOD:
return DeflateFrameExtensionProcessor(method_desc)
_available_processors[common.PERFRAME_COMPRESSION_EXTENSION] = (
PerFrameCompressionExtensionProcessor)
class DeflateMessageProcessor(ExtensionProcessorInterface):
"""Per-message deflate processor."""
_S2C_MAX_WINDOW_BITS_PARAM = 's2c_max_window_bits'
_S2C_NO_CONTEXT_TAKEOVER_PARAM = 's2c_no_context_takeover'
_C2S_MAX_WINDOW_BITS_PARAM = 'c2s_max_window_bits'
_C2S_NO_CONTEXT_TAKEOVER_PARAM = 'c2s_no_context_takeover'
def __init__(self, request):
self._request = request
self._logger = util.get_class_logger(self)
self._c2s_max_window_bits = None
self._c2s_no_context_takeover = False
self._compress_outgoing = False
# Counters for statistics.
# Total number of outgoing bytes supplied to this filter.
self._total_outgoing_payload_bytes = 0
# Total number of bytes sent to the network after applying this filter.
self._total_filtered_outgoing_payload_bytes = 0
# Total number of bytes received from the network.
self._total_incoming_payload_bytes = 0
# Total number of incoming bytes obtained after applying this filter.
self._total_filtered_incoming_payload_bytes = 0
def name(self):
return 'deflate'
def get_extension_response(self):
# Any unknown parameter will be just ignored.
s2c_max_window_bits = self._request.get_parameter_value(
self._S2C_MAX_WINDOW_BITS_PARAM)
if s2c_max_window_bits is not None:
try:
s2c_max_window_bits = int(s2c_max_window_bits)
except ValueError, e:
return None
if s2c_max_window_bits < 8 or s2c_max_window_bits > 15:
return None
s2c_no_context_takeover = self._request.has_parameter(
self._S2C_NO_CONTEXT_TAKEOVER_PARAM)
if (s2c_no_context_takeover and
self._request.get_parameter_value(
self._S2C_NO_CONTEXT_TAKEOVER_PARAM) is not None):
return None
self._deflater = util._RFC1979Deflater(
s2c_max_window_bits, s2c_no_context_takeover)
self._inflater = util._RFC1979Inflater()
self._compress_outgoing = True
response = common.ExtensionParameter(self._request.name())
if s2c_max_window_bits is not None:
response.add_parameter(
self._S2C_MAX_WINDOW_BITS_PARAM, str(s2c_max_window_bits))
if s2c_no_context_takeover is not None:
response.add_parameter(
self._S2C_NO_CONTEXT_TAKEOVER_PARAM, None)
if self._c2s_max_window_bits is not None:
response.add_parameter(
self._C2S_MAX_WINDOW_BITS_PARAM,
str(self._c2s_response_window_bits))
if self._c2s_no_context_takeover:
response.add_parameter(
self._C2S_NO_CONTEXT_TAKEOVER_PARAM, None)
self._logger.debug(
'Enable %s extension ('
'request: s2c_max_window_bits=%s; s2c_no_context_takeover=%r, '
'response: c2s_max_window_bits=%s; c2s_no_context_takeover=%r)' %
(self._request.name(),
s2c_max_window_bits,
s2c_no_context_takeover,
self._c2s_max_window_bits,
self._c2s_no_context_takeover))
return response
def setup_stream_options(self, stream_options):
class _OutgoingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, message, end=True, binary=False):
return self._parent._process_outgoing_message(
message, end, binary)
class _IncomingMessageFilter(object):
def __init__(self, parent):
self._parent = parent
self._decompress_next_message = False
def decompress_next_message(self):
self._decompress_next_message = True
def filter(self, message):
message = self._parent._process_incoming_message(
message, self._decompress_next_message)
self._decompress_next_message = False
return message
self._outgoing_message_filter = _OutgoingMessageFilter(self)
self._incoming_message_filter = _IncomingMessageFilter(self)
stream_options.outgoing_message_filters.append(
self._outgoing_message_filter)
stream_options.incoming_message_filters.append(
self._incoming_message_filter)
class _OutgoingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
self._set_compression_bit = False
def set_compression_bit(self):
self._set_compression_bit = True
def filter(self, frame):
self._parent._process_outgoing_frame(
frame, self._set_compression_bit)
self._set_compression_bit = False
class _IncomingFrameFilter(object):
def __init__(self, parent):
self._parent = parent
def filter(self, frame):
self._parent._process_incoming_frame(frame)
self._outgoing_frame_filter = _OutgoingFrameFilter(self)
self._incoming_frame_filter = _IncomingFrameFilter(self)
stream_options.outgoing_frame_filters.append(
self._outgoing_frame_filter)
stream_options.incoming_frame_filters.append(
self._incoming_frame_filter)
stream_options.encode_text_message_to_utf8 = False
def set_c2s_window_bits(self, value):
self._c2s_max_window_bits = value
def set_c2s_no_context_takeover(self, value):
self._c2s_no_context_takeover = value
def enable_outgoing_compression(self):
self._compress_outgoing = True
def disable_outgoing_compression(self):
self._compress_outgoing = False
def _process_incoming_message(self, message, decompress):
if not decompress:
return message
received_payload_size = len(message)
self._total_incoming_payload_bytes += received_payload_size
message = self._inflater.filter(message)
filtered_payload_size = len(message)
self._total_filtered_incoming_payload_bytes += filtered_payload_size
_log_decompression_ratio(self._logger, received_payload_size,
self._total_incoming_payload_bytes,
filtered_payload_size,
self._total_filtered_incoming_payload_bytes)
return message
def _process_outgoing_message(self, message, end, binary):
if not binary:
message = message.encode('utf-8')
if not self._compress_outgoing:
return message
original_payload_size = len(message)
self._total_outgoing_payload_bytes += original_payload_size
message = self._deflater.filter(message)
filtered_payload_size = len(message)
self._total_filtered_outgoing_payload_bytes += filtered_payload_size
_log_compression_ratio(self._logger, original_payload_size,
self._total_outgoing_payload_bytes,
filtered_payload_size,
self._total_filtered_outgoing_payload_bytes)
self._outgoing_frame_filter.set_compression_bit()
return message
def _process_incoming_frame(self, frame):
if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
self._incoming_message_filter.decompress_next_message()
frame.rsv1 = 0
def _process_outgoing_frame(self, frame, compression_bit):
if (not compression_bit or
common.is_control_opcode(frame.opcode)):
return
frame.rsv1 = 1
class PerMessageCompressionExtensionProcessor(
CompressionExtensionProcessorBase):
"""WebSocket Per-message compression extension processor."""
_DEFLATE_METHOD = 'deflate'
def __init__(self, request):
CompressionExtensionProcessorBase.__init__(self, request)
def name(self):
return common.PERMESSAGE_COMPRESSION_EXTENSION
def _lookup_compression_processor(self, method_desc):
if method_desc.name() == self._DEFLATE_METHOD:
return DeflateMessageProcessor(method_desc)
_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
PerFrameCompressionExtensionProcessor)
class MuxExtensionProcessor(ExtensionProcessorInterface):
"""WebSocket multiplexing extension processor."""
_QUOTA_PARAM = 'quota'
def __init__(self, request):
self._request = request
def name(self):
return common.MUX_EXTENSION
def get_extension_response(self, ws_request,
logical_channel_extensions):
# Mux extension cannot be used after extensions that depend on
# frame boundary, extension data field, or any reserved bits
# which are attributed to each frame.
for extension in logical_channel_extensions:
name = extension.name()
if (name == common.PERFRAME_COMPRESSION_EXTENSION or
name == common.DEFLATE_FRAME_EXTENSION or
name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
return None
quota = self._request.get_parameter_value(self._QUOTA_PARAM)
if quota is None:
ws_request.mux_quota = 0
else:
try:
quota = int(quota)
except ValueError, e:
return None
if quota < 0 or quota >= 2 ** 32:
return None
ws_request.mux_quota = quota
ws_request.mux = True
ws_request.mux_extensions = logical_channel_extensions
return common.ExtensionParameter(common.MUX_EXTENSION)
def setup_stream_options(self, stream_options):
pass
_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
def get_extension_processor(extension_request):
global _available_processors
processor_class = _available_processors.get(extension_request.name())
if processor_class is None:
return None
return processor_class(extension_request)
# vi:sts=4 sw=4 et
|
bsd-3-clause
|
rapyuta/autobahn_rce
|
examples/websocket/echo_site_tls/server.py
|
26
|
2260
|
###############################################################################
##
## Copyright 2011,2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor, ssl
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.resource import WebSocketResource, HTTPChannelHixie76Aware
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, msg, binary):
self.sendMessage(msg, binary)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
contextFactory = ssl.DefaultOpenSSLContextFactory('keys/server.key',
'keys/server.crt')
factory = WebSocketServerFactory("wss://localhost:8080",
debug = debug,
debugCodePaths = debug)
factory.protocol = EchoServerProtocol
factory.setProtocolOptions(allowHixie76 = True) # needed if Hixie76 is to be supported
resource = WebSocketResource(factory)
## we server static files under "/" ..
root = File(".")
## and our WebSocket server under "/ws"
root.putChild("ws", resource)
## both under one Twisted Web Site
site = Site(root)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported
reactor.listenSSL(8080, site, contextFactory)
reactor.run()
|
apache-2.0
|
Lujeni/ansible
|
test/units/modules/network/fortios/test_fortios_router_ospf.py
|
21
|
12710
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_router_ospf
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_router_ospf.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_router_ospf_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_ospf': {
'abr_type': 'cisco',
'auto_cost_ref_bandwidth': '4',
'bfd': 'enable',
'database_overflow': 'enable',
'database_overflow_max_lsas': '7',
'database_overflow_time_to_recover': '8',
'default_information_metric': '9',
'default_information_metric_type': '1',
'default_information_originate': 'enable',
'default_information_route_map': 'test_value_12',
'default_metric': '13',
'distance': '14',
'distance_external': '15',
'distance_inter_area': '16',
'distance_intra_area': '17',
'distribute_list_in': 'test_value_18',
'distribute_route_map_in': 'test_value_19',
'log_neighbour_changes': 'enable',
'restart_mode': 'none',
'restart_period': '22',
'rfc1583_compatible': 'enable',
'router_id': 'test_value_24',
'spf_timers': 'test_value_25',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_ospf.fortios_router(input_data, fos_instance)
expected_data = {
'abr-type': 'cisco',
'auto-cost-ref-bandwidth': '4',
'bfd': 'enable',
'database-overflow': 'enable',
'database-overflow-max-lsas': '7',
'database-overflow-time-to-recover': '8',
'default-information-metric': '9',
'default-information-metric-type': '1',
'default-information-originate': 'enable',
'default-information-route-map': 'test_value_12',
'default-metric': '13',
'distance': '14',
'distance-external': '15',
'distance-inter-area': '16',
'distance-intra-area': '17',
'distribute-list-in': 'test_value_18',
'distribute-route-map-in': 'test_value_19',
'log-neighbour-changes': 'enable',
'restart-mode': 'none',
'restart-period': '22',
'rfc1583-compatible': 'enable',
'router-id': 'test_value_24',
'spf-timers': 'test_value_25',
}
set_method_mock.assert_called_with('router', 'ospf', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_ospf_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_ospf': {
'abr_type': 'cisco',
'auto_cost_ref_bandwidth': '4',
'bfd': 'enable',
'database_overflow': 'enable',
'database_overflow_max_lsas': '7',
'database_overflow_time_to_recover': '8',
'default_information_metric': '9',
'default_information_metric_type': '1',
'default_information_originate': 'enable',
'default_information_route_map': 'test_value_12',
'default_metric': '13',
'distance': '14',
'distance_external': '15',
'distance_inter_area': '16',
'distance_intra_area': '17',
'distribute_list_in': 'test_value_18',
'distribute_route_map_in': 'test_value_19',
'log_neighbour_changes': 'enable',
'restart_mode': 'none',
'restart_period': '22',
'rfc1583_compatible': 'enable',
'router_id': 'test_value_24',
'spf_timers': 'test_value_25',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_ospf.fortios_router(input_data, fos_instance)
expected_data = {
'abr-type': 'cisco',
'auto-cost-ref-bandwidth': '4',
'bfd': 'enable',
'database-overflow': 'enable',
'database-overflow-max-lsas': '7',
'database-overflow-time-to-recover': '8',
'default-information-metric': '9',
'default-information-metric-type': '1',
'default-information-originate': 'enable',
'default-information-route-map': 'test_value_12',
'default-metric': '13',
'distance': '14',
'distance-external': '15',
'distance-inter-area': '16',
'distance-intra-area': '17',
'distribute-list-in': 'test_value_18',
'distribute-route-map-in': 'test_value_19',
'log-neighbour-changes': 'enable',
'restart-mode': 'none',
'restart-period': '22',
'rfc1583-compatible': 'enable',
'router-id': 'test_value_24',
'spf-timers': 'test_value_25',
}
set_method_mock.assert_called_with('router', 'ospf', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_ospf_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_ospf': {
'abr_type': 'cisco',
'auto_cost_ref_bandwidth': '4',
'bfd': 'enable',
'database_overflow': 'enable',
'database_overflow_max_lsas': '7',
'database_overflow_time_to_recover': '8',
'default_information_metric': '9',
'default_information_metric_type': '1',
'default_information_originate': 'enable',
'default_information_route_map': 'test_value_12',
'default_metric': '13',
'distance': '14',
'distance_external': '15',
'distance_inter_area': '16',
'distance_intra_area': '17',
'distribute_list_in': 'test_value_18',
'distribute_route_map_in': 'test_value_19',
'log_neighbour_changes': 'enable',
'restart_mode': 'none',
'restart_period': '22',
'rfc1583_compatible': 'enable',
'router_id': 'test_value_24',
'spf_timers': 'test_value_25',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_ospf.fortios_router(input_data, fos_instance)
expected_data = {
'abr-type': 'cisco',
'auto-cost-ref-bandwidth': '4',
'bfd': 'enable',
'database-overflow': 'enable',
'database-overflow-max-lsas': '7',
'database-overflow-time-to-recover': '8',
'default-information-metric': '9',
'default-information-metric-type': '1',
'default-information-originate': 'enable',
'default-information-route-map': 'test_value_12',
'default-metric': '13',
'distance': '14',
'distance-external': '15',
'distance-inter-area': '16',
'distance-intra-area': '17',
'distribute-list-in': 'test_value_18',
'distribute-route-map-in': 'test_value_19',
'log-neighbour-changes': 'enable',
'restart-mode': 'none',
'restart-period': '22',
'rfc1583-compatible': 'enable',
'router-id': 'test_value_24',
'spf-timers': 'test_value_25',
}
set_method_mock.assert_called_with('router', 'ospf', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_router_ospf_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_ospf': {
'random_attribute_not_valid': 'tag',
'abr_type': 'cisco',
'auto_cost_ref_bandwidth': '4',
'bfd': 'enable',
'database_overflow': 'enable',
'database_overflow_max_lsas': '7',
'database_overflow_time_to_recover': '8',
'default_information_metric': '9',
'default_information_metric_type': '1',
'default_information_originate': 'enable',
'default_information_route_map': 'test_value_12',
'default_metric': '13',
'distance': '14',
'distance_external': '15',
'distance_inter_area': '16',
'distance_intra_area': '17',
'distribute_list_in': 'test_value_18',
'distribute_route_map_in': 'test_value_19',
'log_neighbour_changes': 'enable',
'restart_mode': 'none',
'restart_period': '22',
'rfc1583_compatible': 'enable',
'router_id': 'test_value_24',
'spf_timers': 'test_value_25',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_ospf.fortios_router(input_data, fos_instance)
expected_data = {
'abr-type': 'cisco',
'auto-cost-ref-bandwidth': '4',
'bfd': 'enable',
'database-overflow': 'enable',
'database-overflow-max-lsas': '7',
'database-overflow-time-to-recover': '8',
'default-information-metric': '9',
'default-information-metric-type': '1',
'default-information-originate': 'enable',
'default-information-route-map': 'test_value_12',
'default-metric': '13',
'distance': '14',
'distance-external': '15',
'distance-inter-area': '16',
'distance-intra-area': '17',
'distribute-list-in': 'test_value_18',
'distribute-route-map-in': 'test_value_19',
'log-neighbour-changes': 'enable',
'restart-mode': 'none',
'restart-period': '22',
'rfc1583-compatible': 'enable',
'router-id': 'test_value_24',
'spf-timers': 'test_value_25',
}
set_method_mock.assert_called_with('router', 'ospf', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
gpl-3.0
|
Bismarrck/tensorflow
|
tensorflow/python/util/function_utils_test.py
|
21
|
9293
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator related util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import test
from tensorflow.python.util import function_utils
def silly_example_function():
pass
class SillyCallableClass(object):
def __call__(self):
pass
class FnArgsTest(test.TestCase):
def test_simple_function(self):
def fn(a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(fn))
def test_callable(self):
class Foo(object):
def __call__(self, a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(Foo()))
def test_bounded_method(self):
class Foo(object):
def bar(self, a, b):
return a + b
self.assertEqual(('a', 'b'), function_utils.fn_args(Foo().bar))
def test_partial_function(self):
expected_test_arg = 123
def fn(a, test_arg):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg=123)
self.assertEqual(('a',), function_utils.fn_args(wrapped_fn))
def test_partial_function_with_positional_args(self):
expected_test_arg = 123
def fn(test_arg, a):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, 123)
self.assertEqual(('a',), function_utils.fn_args(wrapped_fn))
self.assertEqual(3, wrapped_fn(3))
self.assertEqual(3, wrapped_fn(a=3))
def test_double_partial(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(a, test_arg1, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
def test_double_partial_with_positional_args_in_outer_layer(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(test_arg1, a, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, 123)
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
self.assertEqual(3, double_wrapped_fn(3))
self.assertEqual(3, double_wrapped_fn(a=3))
def test_double_partial_with_positional_args_in_both_layers(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn(test_arg1, test_arg2, a):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial fn does not work correctly')
return a
wrapped_fn = functools.partial(fn, 123) # binds to test_arg1
double_wrapped_fn = functools.partial(wrapped_fn, 456) # binds to test_arg2
self.assertEqual(('a',), function_utils.fn_args(double_wrapped_fn))
self.assertEqual(3, double_wrapped_fn(3))
self.assertEqual(3, double_wrapped_fn(a=3))
class HasKwargsTest(test.TestCase):
def test_simple_function(self):
fn_has_kwargs = lambda **x: x
self.assertTrue(function_utils.has_kwargs(fn_has_kwargs))
fn_has_no_kwargs = lambda x: x
self.assertFalse(function_utils.has_kwargs(fn_has_no_kwargs))
def test_callable(self):
class FooHasKwargs(object):
def __call__(self, **x):
del x
self.assertTrue(function_utils.has_kwargs(FooHasKwargs()))
class FooHasNoKwargs(object):
def __call__(self, x):
del x
self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs()))
def test_bounded_method(self):
class FooHasKwargs(object):
def fn(self, **x):
del x
self.assertTrue(function_utils.has_kwargs(FooHasKwargs().fn))
class FooHasNoKwargs(object):
def fn(self, x):
del x
self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs().fn))
def test_partial_function(self):
expected_test_arg = 123
def fn_has_kwargs(test_arg, **x):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_kwargs, test_arg=123)
self.assertTrue(function_utils.has_kwargs(wrapped_fn))
some_kwargs = dict(x=1, y=2, z=3)
self.assertEqual(wrapped_fn(**some_kwargs), some_kwargs)
def fn_has_no_kwargs(x, test_arg):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg=123)
self.assertFalse(function_utils.has_kwargs(wrapped_fn))
some_arg = 1
self.assertEqual(wrapped_fn(some_arg), some_arg)
def test_double_partial(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn_has_kwargs(test_arg1, test_arg2, **x):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_kwargs, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertTrue(function_utils.has_kwargs(double_wrapped_fn))
some_kwargs = dict(x=1, y=2, z=3)
self.assertEqual(double_wrapped_fn(**some_kwargs), some_kwargs)
def fn_has_no_kwargs(x, test_arg1, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertFalse(function_utils.has_kwargs(double_wrapped_fn))
some_arg = 1
self.assertEqual(double_wrapped_fn(some_arg), some_arg)
def test_raises_type_error(self):
with self.assertRaisesRegexp(
TypeError, 'fn should be a function-like object'):
function_utils.has_kwargs('not a function')
class GetFuncNameTest(test.TestCase):
def testWithSimpleFunction(self):
self.assertEqual(
'silly_example_function',
function_utils.get_func_name(silly_example_function))
def testWithClassMethod(self):
self.assertEqual(
'GetFuncNameTest.testWithClassMethod',
function_utils.get_func_name(self.testWithClassMethod))
def testWithCallableClass(self):
callable_instance = SillyCallableClass()
self.assertRegexpMatches(
function_utils.get_func_name(callable_instance),
'<.*SillyCallableClass.*>')
def testWithFunctoolsPartial(self):
partial = functools.partial(silly_example_function)
self.assertRegexpMatches(
function_utils.get_func_name(partial),
'<.*functools.partial.*>')
def testWithLambda(self):
anon_fn = lambda x: x
self.assertEqual('<lambda>', function_utils.get_func_name(anon_fn))
def testRaisesWithNonCallableObject(self):
with self.assertRaises(ValueError):
function_utils.get_func_name(None)
class GetFuncCodeTest(test.TestCase):
def testWithSimpleFunction(self):
code = function_utils.get_func_code(silly_example_function)
self.assertIsNotNone(code)
self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
def testWithClassMethod(self):
code = function_utils.get_func_code(self.testWithClassMethod)
self.assertIsNotNone(code)
self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
def testWithCallableClass(self):
callable_instance = SillyCallableClass()
code = function_utils.get_func_code(callable_instance)
self.assertIsNotNone(code)
self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
def testWithLambda(self):
anon_fn = lambda x: x
code = function_utils.get_func_code(anon_fn)
self.assertIsNotNone(code)
self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
def testWithFunctoolsPartial(self):
partial = functools.partial(silly_example_function)
code = function_utils.get_func_code(partial)
self.assertIsNone(code)
def testRaisesWithNonCallableObject(self):
with self.assertRaises(ValueError):
function_utils.get_func_code(None)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
InverseLina/tornado
|
tornado/platform/twisted.py
|
58
|
21688
|
# Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Note: This module's docs are not currently extracted automatically,
# so changes must be made manually to twisted.rst
# TODO: refactor doc build process to use an appropriate virtualenv
"""Bridges between the Twisted reactor and Tornado IOLoop.
This module lets you run applications and libraries written for
Twisted in a Tornado application. It can be used in two modes,
depending on which library's underlying event loop you want to use.
This module has been tested with Twisted versions 11.0.0 and newer.
Twisted on Tornado
------------------
`TornadoReactor` implements the Twisted reactor interface on top of
the Tornado IOLoop. To use it, simply call `install` at the beginning
of the application::
import tornado.platform.twisted
tornado.platform.twisted.install()
from twisted.internet import reactor
When the app is ready to start, call `IOLoop.current().start()`
instead of `reactor.run()`.
It is also possible to create a non-global reactor by calling
`tornado.platform.twisted.TornadoReactor(io_loop)`. However, if
the `IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is
recommended to call::
reactor.fireSystemEvent('shutdown')
reactor.disconnectAll()
before closing the `IOLoop`.
Tornado on Twisted
------------------
`TwistedIOLoop` implements the Tornado IOLoop interface on top of the Twisted
reactor. Recommended usage::
from tornado.platform.twisted import TwistedIOLoop
from twisted.internet import reactor
TwistedIOLoop().install()
# Set up your tornado application as usual using `IOLoop.instance`
reactor.run()
`TwistedIOLoop` always uses the global Twisted reactor.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import functools
import numbers
import socket
import sys
import twisted.internet.abstract
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase
from twisted.internet.interfaces import \
IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor
from twisted.python import failure, log
from twisted.internet import error
import twisted.names.cache
import twisted.names.client
import twisted.names.hosts
import twisted.names.resolve
from zope.interface import implementer
from tornado.concurrent import Future
from tornado.escape import utf8
from tornado import gen
import tornado.ioloop
from tornado.log import app_log
from tornado.netutil import Resolver
from tornado.stack_context import NullContext, wrap
from tornado.ioloop import IOLoop
from tornado.util import timedelta_to_seconds
@implementer(IDelayedCall)
class TornadoDelayedCall(object):
"""DelayedCall object for Tornado."""
def __init__(self, reactor, seconds, f, *args, **kw):
self._reactor = reactor
self._func = functools.partial(f, *args, **kw)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
self._active = True
def _called(self):
self._active = False
self._reactor._removeDelayedCall(self)
try:
self._func()
except:
app_log.error("_called caught exception", exc_info=True)
def getTime(self):
return self._time
def cancel(self):
self._active = False
self._reactor._io_loop.remove_timeout(self._timeout)
self._reactor._removeDelayedCall(self)
def delay(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time += seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def reset(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def active(self):
return self._active
@implementer(IReactorTime, IReactorFDSet)
class TornadoReactor(PosixReactorBase):
"""Twisted reactor built on the Tornado IOLoop.
Since it is intended to be used in applications where the top-level
event loop is ``io_loop.start()`` rather than ``reactor.run()``,
it is implemented a little differently than other Twisted reactors.
We override `mainLoop` instead of `doIteration` and must implement
timed call functionality on top of `IOLoop.add_timeout` rather than
using the implementation in `PosixReactorBase`.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, io_loop=None):
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple
self._delayedCalls = {}
PosixReactorBase.__init__(self)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
# IOLoop.start() bypasses some of the reactor initialization.
# Fire off the necessary events if they weren't already triggered
# by reactor.run().
def start_if_necessary():
if not self._started:
self.fireSystemEvent('startup')
self._io_loop.add_callback(start_if_necessary)
# IReactorTime
def seconds(self):
return self._io_loop.time()
def callLater(self, seconds, f, *args, **kw):
dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
self._delayedCalls[dc] = True
return dc
def getDelayedCalls(self):
return [x for x in self._delayedCalls if x._active]
def _removeDelayedCall(self, dc):
if dc in self._delayedCalls:
del self._delayedCalls[dc]
# IReactorThreads
def callFromThread(self, f, *args, **kw):
"""See `twisted.internet.interfaces.IReactorThreads.callFromThread`"""
assert callable(f), "%s is not callable" % f
with NullContext():
# This NullContext is mainly for an edge case when running
# TwistedIOLoop on top of a TornadoReactor.
# TwistedIOLoop.add_callback uses reactor.callFromThread and
# should not pick up additional StackContexts along the way.
self._io_loop.add_callback(f, *args, **kw)
# We don't need the waker code from the super class, Tornado uses
# its own waker.
def installWaker(self):
pass
def wakeUp(self):
pass
# IReactorFDSet
def _invoke_callback(self, fd, events):
if fd not in self._fds:
return
(reader, writer) = self._fds[fd]
if reader:
err = None
if reader.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.READ:
err = log.callWithLogger(reader, reader.doRead)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeReader(reader)
reader.readConnectionLost(failure.Failure(err))
if writer:
err = None
if writer.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.WRITE:
err = log.callWithLogger(writer, writer.doWrite)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeWriter(writer)
writer.writeConnectionLost(failure.Failure(err))
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read."""
if reader in self._readers:
# Don't add the reader if it's already there
return
fd = reader.fileno()
self._readers[reader] = fd
if fd in self._fds:
(_, writer) = self._fds[fd]
self._fds[fd] = (reader, writer)
if writer:
# We already registered this fd for write events,
# update it for read events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (reader, None)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.READ)
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write."""
if writer in self._writers:
return
fd = writer.fileno()
self._writers[writer] = fd
if fd in self._fds:
(reader, _) = self._fds[fd]
self._fds[fd] = (reader, writer)
if reader:
# We already registered this fd for read events,
# update it for write events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (None, writer)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.WRITE)
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read."""
if reader in self._readers:
fd = self._readers.pop(reader)
(_, writer) = self._fds[fd]
if writer:
# We have a writer so we need to update the IOLoop for
# write events only.
self._fds[fd] = (None, writer)
self._io_loop.update_handler(fd, IOLoop.WRITE)
else:
# Since we have no writer registered, we remove the
# entry from _fds and unregister the handler from the
# IOLoop
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write."""
if writer in self._writers:
fd = self._writers.pop(writer)
(reader, _) = self._fds[fd]
if reader:
# We have a reader so we need to update the IOLoop for
# read events only.
self._fds[fd] = (reader, None)
self._io_loop.update_handler(fd, IOLoop.READ)
else:
# Since we have no reader registered, we remove the
# entry from the _fds and unregister the handler from
# the IOLoop.
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeAll(self):
return self._removeAll(self._readers, self._writers)
def getReaders(self):
return self._readers.keys()
def getWriters(self):
return self._writers.keys()
# The following functions are mainly used in twisted-style test cases;
# it is expected that most users of the TornadoReactor will call
# IOLoop.start() instead of Reactor.run().
def stop(self):
PosixReactorBase.stop(self)
fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
self._io_loop.add_callback(fire_shutdown)
def crash(self):
PosixReactorBase.crash(self)
self._io_loop.stop()
def doIteration(self, delay):
raise NotImplementedError("doIteration")
def mainLoop(self):
self._io_loop.start()
class _TestReactor(TornadoReactor):
"""Subclass of TornadoReactor for use in unittests.
This can't go in the test.py file because of import-order dependencies
with the Twisted reactor test builder.
"""
def __init__(self):
# always use a new ioloop
super(_TestReactor, self).__init__(IOLoop())
def listenTCP(self, port, factory, backlog=50, interface=''):
# default to localhost to avoid firewall prompts on the mac
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenTCP(
port, factory, backlog=backlog, interface=interface)
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenUDP(
port, protocol, interface=interface, maxPacketSize=maxPacketSize)
def install(io_loop=None):
"""Install this package as the default Twisted reactor.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
reactor = TornadoReactor(io_loop)
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
@implementer(IReadDescriptor, IWriteDescriptor)
class _FD(object):
def __init__(self, fd, fileobj, handler):
self.fd = fd
self.fileobj = fileobj
self.handler = handler
self.reading = False
self.writing = False
self.lost = False
def fileno(self):
return self.fd
def doRead(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
def doWrite(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
def connectionLost(self, reason):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
self.lost = True
def logPrefix(self):
return ''
class TwistedIOLoop(tornado.ioloop.IOLoop):
"""IOLoop implementation that runs on Twisted.
Uses the global Twisted reactor by default. To create multiple
`TwistedIOLoops` in the same process, you must pass a unique reactor
when constructing each one.
Not compatible with `tornado.process.Subprocess.set_exit_callback`
because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
with each other.
"""
def initialize(self, reactor=None, **kwargs):
super(TwistedIOLoop, self).initialize(**kwargs)
if reactor is None:
import twisted.internet.reactor
reactor = twisted.internet.reactor
self.reactor = reactor
self.fds = {}
def close(self, all_fds=False):
fds = self.fds
self.reactor.removeAll()
for c in self.reactor.getDelayedCalls():
c.cancel()
if all_fds:
for fd in fds.values():
self.close_fd(fd.fileobj)
def add_handler(self, fd, handler, events):
if fd in self.fds:
raise ValueError('fd %s added twice' % fd)
fd, fileobj = self.split_fd(fd)
self.fds[fd] = _FD(fd, fileobj, wrap(handler))
if events & tornado.ioloop.IOLoop.READ:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & tornado.ioloop.IOLoop.READ:
if not self.fds[fd].reading:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
else:
if self.fds[fd].reading:
self.fds[fd].reading = False
self.reactor.removeReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
if not self.fds[fd].writing:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
else:
if self.fds[fd].writing:
self.fds[fd].writing = False
self.reactor.removeWriter(self.fds[fd])
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.fds:
return
self.fds[fd].lost = True
if self.fds[fd].reading:
self.reactor.removeReader(self.fds[fd])
if self.fds[fd].writing:
self.reactor.removeWriter(self.fds[fd])
del self.fds[fd]
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.reactor.run()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.reactor.crash()
def add_timeout(self, deadline, callback, *args, **kwargs):
# This method could be simplified (since tornado 4.0) by
# overriding call_at instead of add_timeout, but we leave it
# for now as a test of backwards-compatibility.
if isinstance(deadline, numbers.Real):
delay = max(deadline - self.time(), 0)
elif isinstance(deadline, datetime.timedelta):
delay = timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r")
return self.reactor.callLater(
delay, self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
if timeout.active():
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
self.reactor.callFromThread(
self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
self.add_callback(callback, *args, **kwargs)
class TwistedResolver(Resolver):
"""Twisted-based asynchronous resolver.
This is a non-blocking and non-threaded resolver. It is
recommended only when threads cannot be used, since it has
limitations compared to the standard ``getaddrinfo``-based
`~tornado.netutil.Resolver` and
`~tornado.netutil.ThreadedResolver`. Specifically, it returns at
most one result, and arguments other than ``host`` and ``family``
are ignored. It may fail to resolve when ``family`` is not
``socket.AF_UNSPEC``.
Requires Twisted 12.1 or newer.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
# partial copy of twisted.names.client.createResolver, which doesn't
# allow for a reactor to be passed in.
self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
reactor=self.reactor)
self.resolver = twisted.names.resolve.ResolverChain(
[host_resolver, cache_resolver, real_resolver])
@gen.coroutine
def resolve(self, host, port, family=0):
# getHostByName doesn't accept IP addresses, so if the input
# looks like an IP address just return it immediately.
if twisted.internet.abstract.isIPAddress(host):
resolved = host
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(host):
resolved = host
resolved_family = socket.AF_INET6
else:
deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addBoth)
if isinstance(resolved, failure.Failure):
resolved.raiseException()
elif twisted.internet.abstract.isIPAddress(resolved):
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(resolved):
resolved_family = socket.AF_INET6
else:
resolved_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != resolved_family:
raise Exception('Requested socket family %d but got %d' %
(family, resolved_family))
result = [
(resolved_family, (resolved, port)),
]
raise gen.Return(result)
if hasattr(gen.convert_yielded, 'register'):
@gen.convert_yielded.register(Deferred)
def _(d):
f = Future()
def errback(failure):
try:
failure.raiseException()
# Should never happen, but just in case
raise Exception("errback called without error")
except:
f.set_exc_info(sys.exc_info())
d.addCallbacks(f.set_result, errback)
return f
|
apache-2.0
|
peterlada/backend
|
docs/conf.py
|
1
|
8265
|
# -*- coding: utf-8 -*-
#
# backend documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 25 13:03:08 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinxcontrib.autohttp.flask'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'backend'
copyright = u'2013, Matt Wright'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Overholtdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'backend.tex', u'backend Documentation',
u'Matt Wright', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'backend', u'backend Documentation',
[u'Matt Wright'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'backend', u'backend Documentation',
u'Matt Wright', 'backend', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
mit
|
paterson/servo
|
tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/_stream_base.py
|
652
|
5978
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Base stream class.
"""
# Note: request.connection.write/read are used in this module, even though
# mod_python document says that they should be used only in connection
# handlers. Unfortunately, we have no other options. For example,
# request.write/read are not suitable because they don't allow direct raw bytes
# writing/reading.
import socket
from mod_pywebsocket import util
# Exceptions
class ConnectionTerminatedException(Exception):
"""This exception will be raised when a connection is terminated
unexpectedly.
"""
pass
class InvalidFrameException(ConnectionTerminatedException):
"""This exception will be raised when we received an invalid frame we
cannot parse.
"""
pass
class BadOperationException(Exception):
"""This exception will be raised when send_message() is called on
server-terminated connection or receive_message() is called on
client-terminated connection.
"""
pass
class UnsupportedFrameException(Exception):
"""This exception will be raised when we receive a frame with flag, opcode
we cannot handle. Handlers can just catch and ignore this exception and
call receive_message() again to continue processing the next frame.
"""
pass
class InvalidUTF8Exception(Exception):
"""This exception will be raised when we receive a text frame which
contains invalid UTF-8 strings.
"""
pass
class StreamBase(object):
"""Base stream class."""
def __init__(self, request):
"""Construct an instance.
Args:
request: mod_python request.
"""
self._logger = util.get_class_logger(self)
self._request = request
def _read(self, length):
"""Reads length bytes from connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
try:
read_bytes = self._request.connection.read(length)
if not read_bytes:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Peer (%r) closed connection' %
(length, (self._request.connection.remote_addr,)))
return read_bytes
except socket.error, e:
# Catch a socket.error. Because it's not a child class of the
# IOError prior to Python 2.6, we cannot omit this except clause.
# Use %s rather than %r for the exception to use human friendly
# format.
raise ConnectionTerminatedException(
'Receiving %d byte failed. socket.error (%s) occurred' %
(length, e))
except IOError, e:
# Also catch an IOError because mod_python throws it.
raise ConnectionTerminatedException(
'Receiving %d byte failed. IOError (%s) occurred' %
(length, e))
def _write(self, bytes_to_write):
"""Writes given bytes to connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
"""
try:
self._request.connection.write(bytes_to_write)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._request.connection.remote_addr,),
e)
raise
def receive_bytes(self, length):
"""Receives multiple bytes. Retries read when we couldn't receive the
specified amount.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while length > 0:
new_read_bytes = self._read(length)
read_bytes.append(new_read_bytes)
length -= len(new_read_bytes)
return ''.join(read_bytes)
def _read_until(self, delim_char):
"""Reads bytes until we encounter delim_char. The result will not
contain delim_char.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
read_bytes = []
while True:
ch = self._read(1)
if ch == delim_char:
break
read_bytes.append(ch)
return ''.join(read_bytes)
# vi:sts=4 sw=4 et
|
mpl-2.0
|
TheNaterz/koadic
|
core/loader.py
|
1
|
1886
|
import os
import sys
import inspect
import core.plugin
def load_plugins(dir, instantiate = False, shell = None):
plugins = {}
for root, dirs, files in os.walk(dir):
sys.path.append(root)
# make forward slashes on windows
module_root = root.replace(dir, "").replace("\\", "/")
#if (module_root.startswith("/")):
#module_root = module_root[1:]
#print root
for file in files:
if not file.endswith(".py"):
continue
if file in ["__init__.py"]:
continue
file = file.rsplit(".py", 1)[0]
pname = module_root + "/" + file
if (pname.startswith("/")):
pname = pname[1:]
if instantiate:
if pname in sys.modules:
del sys.modules[pname]
env = __import__(file, )
for name, obj in inspect.getmembers(env):
if inspect.isclass(obj) and issubclass(obj, core.plugin.Plugin):
plugins[pname] = obj(shell)
break
else:
plugins[pname] = __import__(file)
sys.path.remove(root)
return plugins
def load_script(path, options = None, minimize = True):
with open(path, "rb") as f:
script = f.read().strip()
#script = self.linter.prepend_stdlib(script)
#if minimize:
#script = self.linter.minimize_script(script)
script = apply_options(script, options)
return script
def apply_options(script, options = None):
if options is not None:
for option in options.options:
name = "~%s~" % option.name
val = str(option.value).encode()
script = script.replace(name.encode(), val)
script = script.replace(name.lower().encode(), val)
return script
|
apache-2.0
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/django/views/csrf.py
|
70
|
5732
|
from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.translation import ugettext as _
from django.utils.version import get_docs_version
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function passes a <code>request</code> to the template's <a
href="https://docs.djangoproject.com/en/dev/topics/templates/#django.template.backends.base.Template.render"><code>render</code></a>
method.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
<li>The form has a valid CSRF token. After logging in in another browser
tab or hitting the back button after a login, you may need to reload the
page with the form, because the token is rotated after a login.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
"""
CSRF_FAILURE_TEMPLATE_NAME = "403_csrf.html"
def csrf_failure(request, reason="", template_name=CSRF_FAILURE_TEMPLATE_NAME):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
c = Context({
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
'DEBUG': settings.DEBUG,
'docs_version': get_docs_version(),
'more': _("More information is available with DEBUG=True."),
})
try:
t = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name == CSRF_FAILURE_TEMPLATE_NAME:
# If the default template doesn't exist, use the string template.
t = Engine().from_string(CSRF_FAILURE_TEMPLATE)
else:
# Raise if a developer-specified template doesn't exist.
raise
return HttpResponseForbidden(t.render(c), content_type='text/html')
|
mit
|
wahuneke/django-stripe-payments
|
payments/tests/test_email.py
|
2
|
1329
|
# pylint: disable=C0301
import decimal
from django.core import mail
from django.test import TestCase
from mock import patch
from ..models import Customer
from ..utils import get_user_model
class EmailReceiptTest(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create_user(username="patrick")
self.customer = Customer.objects.create(
user=self.user,
stripe_id="cus_xxxxxxxxxxxxxxx",
card_fingerprint="YYYYYYYY",
card_last_4="2342",
card_kind="Visa"
)
@patch("stripe.Charge.retrieve")
@patch("stripe.Charge.create")
def test_email_receipt_renders_amount_properly(self, ChargeMock, RetrieveMock):
ChargeMock.return_value.id = "ch_XXXXX"
RetrieveMock.return_value = {
"id": "ch_XXXXXX",
"card": {
"last4": "4323",
"type": "Visa"
},
"amount": 40000,
"paid": True,
"refunded": False,
"fee": 499,
"dispute": None,
"created": 1363911708,
"customer": "cus_xxxxxxxxxxxxxxx"
}
self.customer.charge(
amount=decimal.Decimal("400.00")
)
self.assertTrue("$400.00" in mail.outbox[0].body)
|
bsd-3-clause
|
nzlosh/st2
|
st2auth/st2auth/backends/__init__.py
|
3
|
1958
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import json
import six
from oslo_config import cfg
from st2common import log as logging
from st2common.util import driver_loader
__all__ = ["get_available_backends", "get_backend_instance"]
LOG = logging.getLogger(__name__)
BACKENDS_NAMESPACE = "st2auth.backends.backend"
def get_available_backends():
return driver_loader.get_available_backends(namespace=BACKENDS_NAMESPACE)
def get_backend_instance(name):
backend_kwargs = cfg.CONF.auth.backend_kwargs
if backend_kwargs:
try:
kwargs = json.loads(backend_kwargs)
except ValueError as e:
raise ValueError(
'Failed to JSON parse backend settings for backend "%s": %s'
% (name, six.text_type(e))
)
else:
kwargs = {}
cls = driver_loader.get_backend_driver(namespace=BACKENDS_NAMESPACE, name=name)
try:
cls_instance = cls(**kwargs)
except Exception as e:
tb_msg = traceback.format_exc()
class_name = cls.__name__
msg = (
'Failed to instantiate auth backend "%s" (class %s) with backend settings '
'"%s": %s' % (name, class_name, str(kwargs), six.text_type(e))
)
msg += "\n\n" + tb_msg
exc_cls = type(e)
raise exc_cls(msg)
return cls_instance
|
apache-2.0
|
ecino/compassion-modules
|
child_compassion/wizards/project_description.py
|
3
|
6938
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import os
import logging
from odoo import api, models, fields, _
try:
from pyquery import PyQuery
except ImportError:
logger = logging.getLogger(__name__)
logger.error("Please install python pyquery")
NOMINATIVE = 0
ACCUSATIVE = 1
DATIVE = 2
SINGULAR = 0
PLURAL = 1
DIR = os.path.join(os.path.dirname(__file__)) + '/../static/src/html/'
__template_file = open(DIR + 'project_description_template.html')
HTML_TEMPLATE = __template_file.read()
__template_file.close()
class ProjectDescription(models.TransientModel):
_name = 'compassion.project.description'
_description = 'Project Description Generator'
project_id = fields.Many2one(
'compassion.project', required=True, ondelete='cascade')
@api.model
def create(self, vals):
""" This will automatically generate all descriptions and save them
in the related child.
"""
generator = super(ProjectDescription, self).create(vals)
for lang, field in self._supported_languages().iteritems():
desc = generator.with_context(lang=lang)._generate_translation()
generator.project_id.write({field: desc})
return generator
@api.model
def _supported_languages(self):
"""
Inherit to add more languages to have translations of
descriptions.
{lang: description_field}
"""
return {'en_US': 'description_en'}
def _generate_translation(self):
""" Generate project description. """
desc = PyQuery(HTML_TEMPLATE)
# 1. Basic Information
######################
project = self.project_id
# Put country if not the same as Field Office
if project.country_id and project.country_id != \
project.field_office_id.country_id:
desc('.project_country')[0].text = _(
"The project is located in %s, close to the border."
) % project.country_id.name
else:
desc('#project_country').remove()
desc('.project_name')[0].text = _("Project name")
desc('.project_name')[1].text = project.name
desc('.project_closest_city')[0].text = _("Closest city")
self._show_field(
desc('.project_closest_city')[1], desc('#project_closest_city'),
project.closest_city
)
desc('.project_cdsp_number')[0].text = _("Number of children")
self._show_field(
desc('.project_cdsp_number')[1], desc('#project_cdsp_number'),
project.nb_cdsp_kids
)
if project.electrical_power == 'Not Available':
desc('.project_electricity').html(
_("The project has no electricity."))
else:
desc('#project_electricity').remove()
# 2. Community
##############
desc('#community_label').html(_("Local community"))
desc('.community_population')[0].text = _("Population")
self._show_field(
desc('.community_population')[1], desc('#community_population'),
'{:,}'.format(project.community_population).replace(',', "'")
)
desc('.community_language')[0].text = _("Language")
self._show_field(
desc('.community_language')[1], desc('#community_language'),
project.primary_language_id.name
)
if project.primary_adults_occupation_ids:
desc('.community_job')[0].text = _("Typical job")
self._show_field(
desc('.community_job')[1], desc('#community_job'),
project.primary_adults_occupation_ids[0].value
)
else:
desc('#community_job').remove()
desc('.community_food')[0].text = _("Typical food")
if project.primary_diet_ids:
desc('.community_food')[1].text = project.primary_diet_ids[0].value
else:
desc('#community_food').remove()
desc('.community_school_begins')[0].text = _("School begins in")
self._show_field(
desc('.community_school_begins')[1],
desc('#community_school_begins'),
project.translate('school_year_begins')
)
# 3. Activities
###############
spiritual = project.get_activities('spiritual_activity', 3)
physical = project.get_activities('physical_activity', 3)
cognitive = project.get_activities('cognitive_activity', 3)
socio = project.get_activities('socio_activity', 3)
if spiritual or physical or cognitive or socio:
desc('#activities_label').html(
_("Project activities for children"))
else:
desc('#activities').remove()
if spiritual:
desc('.spiritual_activities').html(_("Spiritual activities"))
desc('#spiritual_activities_list').html(''.join(
['<li>' + activity + '</li>' for activity in spiritual]))
else:
desc('#spiritual_activities').remove()
if physical:
desc('.physical_activities').html(_("Physical activities"))
desc('#physical_activities_list').html(''.join(
['<li>' + activity + '</li>' for activity in physical]))
else:
desc('#physical_activities').remove()
if cognitive:
desc('.cognitive_activities').html(_("Cognitive activities"))
desc('#cognitive_activities_list').html(''.join(
['<li>' + activity + '</li>' for activity in cognitive]))
else:
desc('#cognitive_activities').remove()
if socio:
desc('.socio_activities').html(_("Socio-emotional activities"))
desc('#socio_activities_list').html(''.join(
['<li>' + activity + '</li>' for activity in socio]))
else:
desc('#socio_activities').remove()
if project.activities_for_parents:
desc('.parent_activities').html(
_("In addition, the project offers special activities for the "
"parents such as education courses."))
else:
desc('#parent_activities').remove()
return desc.html()
def _show_field(self, field, container, value):
""" Used to display a field in the description, or hide it
if the value is not set.
"""
if value:
if not isinstance(value, basestring):
value = str(value)
field.text = value
else:
container.remove()
|
agpl-3.0
|
akintoey/django
|
tests/utils_tests/test_autoreload.py
|
191
|
7073
|
import os
import shutil
import tempfile
from importlib import import_module
from django import conf
from django.contrib import admin
from django.test import SimpleTestCase, override_settings
from django.test.utils import extend_sys_path
from django.utils import autoreload
from django.utils._os import npath
LOCALE_PATH = os.path.join(os.path.dirname(__file__), 'locale')
class TestFilenameGenerator(SimpleTestCase):
def clear_autoreload_caches(self):
autoreload._cached_modules = set()
autoreload._cached_filenames = []
def assertFileFound(self, filename):
self.clear_autoreload_caches()
# Test uncached access
self.assertIn(npath(filename), autoreload.gen_filenames())
# Test cached access
self.assertIn(npath(filename), autoreload.gen_filenames())
def assertFileNotFound(self, filename):
self.clear_autoreload_caches()
# Test uncached access
self.assertNotIn(npath(filename), autoreload.gen_filenames())
# Test cached access
self.assertNotIn(npath(filename), autoreload.gen_filenames())
def assertFileFoundOnlyNew(self, filename):
self.clear_autoreload_caches()
# Test uncached access
self.assertIn(npath(filename), autoreload.gen_filenames(only_new=True))
# Test cached access
self.assertNotIn(npath(filename), autoreload.gen_filenames(only_new=True))
def test_django_locales(self):
"""
Test that gen_filenames() yields the built-in Django locale files.
"""
django_dir = os.path.join(os.path.dirname(conf.__file__), 'locale')
django_mo = os.path.join(django_dir, 'nl', 'LC_MESSAGES', 'django.mo')
self.assertFileFound(django_mo)
@override_settings(LOCALE_PATHS=[LOCALE_PATH])
def test_locale_paths_setting(self):
"""
Test that gen_filenames also yields from LOCALE_PATHS locales.
"""
locale_paths_mo = os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo')
self.assertFileFound(locale_paths_mo)
@override_settings(INSTALLED_APPS=[])
def test_project_root_locale(self):
"""
Test that gen_filenames also yields from the current directory (project
root).
"""
old_cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
current_dir = os.path.join(os.path.dirname(__file__), 'locale')
current_dir_mo = os.path.join(current_dir, 'nl', 'LC_MESSAGES', 'django.mo')
try:
self.assertFileFound(current_dir_mo)
finally:
os.chdir(old_cwd)
@override_settings(INSTALLED_APPS=['django.contrib.admin'])
def test_app_locales(self):
"""
Test that gen_filenames also yields from locale dirs in installed apps.
"""
admin_dir = os.path.join(os.path.dirname(admin.__file__), 'locale')
admin_mo = os.path.join(admin_dir, 'nl', 'LC_MESSAGES', 'django.mo')
self.assertFileFound(admin_mo)
@override_settings(USE_I18N=False)
def test_no_i18n(self):
"""
If i18n machinery is disabled, there is no need for watching the
locale files.
"""
django_dir = os.path.join(os.path.dirname(conf.__file__), 'locale')
django_mo = os.path.join(django_dir, 'nl', 'LC_MESSAGES', 'django.mo')
self.assertFileNotFound(django_mo)
def test_paths_are_native_strings(self):
for filename in autoreload.gen_filenames():
self.assertIsInstance(filename, str)
def test_only_new_files(self):
"""
When calling a second time gen_filenames with only_new = True, only
files from newly loaded modules should be given.
"""
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, 'test_only_new_module.py')
self.addCleanup(shutil.rmtree, dirname)
with open(filename, 'w'):
pass
# Test uncached access
self.clear_autoreload_caches()
filenames = set(autoreload.gen_filenames(only_new=True))
filenames_reference = set(autoreload.gen_filenames())
self.assertEqual(filenames, filenames_reference)
# Test cached access: no changes
filenames = set(autoreload.gen_filenames(only_new=True))
self.assertEqual(filenames, set())
# Test cached access: add a module
with extend_sys_path(dirname):
import_module('test_only_new_module')
filenames = set(autoreload.gen_filenames(only_new=True))
self.assertEqual(filenames, {npath(filename)})
def test_deleted_removed(self):
"""
When a file is deleted, gen_filenames() no longer returns it.
"""
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, 'test_deleted_removed_module.py')
self.addCleanup(shutil.rmtree, dirname)
with open(filename, 'w'):
pass
with extend_sys_path(dirname):
import_module('test_deleted_removed_module')
self.assertFileFound(filename)
os.unlink(filename)
self.assertFileNotFound(filename)
def test_check_errors(self):
"""
When a file containing an error is imported in a function wrapped by
check_errors(), gen_filenames() returns it.
"""
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, 'test_syntax_error.py')
self.addCleanup(shutil.rmtree, dirname)
with open(filename, 'w') as f:
f.write("Ceci n'est pas du Python.")
with extend_sys_path(dirname):
with self.assertRaises(SyntaxError):
autoreload.check_errors(import_module)('test_syntax_error')
self.assertFileFound(filename)
def test_check_errors_only_new(self):
"""
When a file containing an error is imported in a function wrapped by
check_errors(), gen_filenames(only_new=True) returns it.
"""
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, 'test_syntax_error.py')
self.addCleanup(shutil.rmtree, dirname)
with open(filename, 'w') as f:
f.write("Ceci n'est pas du Python.")
with extend_sys_path(dirname):
with self.assertRaises(SyntaxError):
autoreload.check_errors(import_module)('test_syntax_error')
self.assertFileFoundOnlyNew(filename)
def test_check_errors_catches_all_exceptions(self):
"""
Since Python may raise arbitrary exceptions when importing code,
check_errors() must catch Exception, not just some subclasses.
"""
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, 'test_exception.py')
self.addCleanup(shutil.rmtree, dirname)
with open(filename, 'w') as f:
f.write("raise Exception")
with extend_sys_path(dirname):
with self.assertRaises(Exception):
autoreload.check_errors(import_module)('test_exception')
self.assertFileFound(filename)
|
bsd-3-clause
|
dudonwai/dudonsblog
|
Lib/encodings/punycode.py
|
586
|
6813
|
# -*- coding: iso-8859-1 -*-
""" Codec for the Punicode encoding, as specified in RFC 3492
Written by Martin v. Löwis.
"""
import codecs
##################### Encoding #####################################
def segregate(str):
"""3.1 Basic code point segregation"""
base = []
extended = {}
for c in str:
if ord(c) < 128:
base.append(c)
else:
extended[c] = 1
extended = extended.keys()
extended.sort()
return "".join(base).encode("ascii"),extended
def selective_len(str, max):
"""Return the length of str, considering only characters below max."""
res = 0
for c in str:
if ord(c) < max:
res += 1
return res
def selective_find(str, char, index, pos):
"""Return a pair (index, pos), indicating the next occurrence of
char in str. index is the position of the character considering
only ordinals up to and including char, and pos is the position in
the full string. index/pos is the starting position in the full
string."""
l = len(str)
while 1:
pos += 1
if pos == l:
return (-1, -1)
c = str[pos]
if c == char:
return index+1, pos
elif c < char:
index += 1
def insertion_unsort(str, extended):
"""3.2 Insertion unsort coding"""
oldchar = 0x80
result = []
oldindex = -1
for c in extended:
index = pos = -1
char = ord(c)
curlen = selective_len(str, char)
delta = (curlen+1) * (char - oldchar)
while 1:
index,pos = selective_find(str,c,index,pos)
if index == -1:
break
delta += index - oldindex
result.append(delta-1)
oldindex = index
delta = 0
oldchar = char
return result
def T(j, bias):
# Punycode parameters: tmin = 1, tmax = 26, base = 36
res = 36 * (j + 1) - bias
if res < 1: return 1
if res > 26: return 26
return res
digits = "abcdefghijklmnopqrstuvwxyz0123456789"
def generate_generalized_integer(N, bias):
"""3.3 Generalized variable-length integers"""
result = []
j = 0
while 1:
t = T(j, bias)
if N < t:
result.append(digits[N])
return result
result.append(digits[t + ((N - t) % (36 - t))])
N = (N - t) // (36 - t)
j += 1
def adapt(delta, first, numchars):
if first:
delta //= 700
else:
delta //= 2
delta += delta // numchars
# ((base - tmin) * tmax) // 2 == 455
divisions = 0
while delta > 455:
delta = delta // 35 # base - tmin
divisions += 36
bias = divisions + (36 * delta // (delta + 38))
return bias
def generate_integers(baselen, deltas):
"""3.4 Bias adaptation"""
# Punycode parameters: initial bias = 72, damp = 700, skew = 38
result = []
bias = 72
for points, delta in enumerate(deltas):
s = generate_generalized_integer(delta, bias)
result.extend(s)
bias = adapt(delta, points==0, baselen+points+1)
return "".join(result)
def punycode_encode(text):
base, extended = segregate(text)
base = base.encode("ascii")
deltas = insertion_unsort(text, extended)
extended = generate_integers(len(base), deltas)
if base:
return base + "-" + extended
return extended
##################### Decoding #####################################
def decode_generalized_number(extended, extpos, bias, errors):
"""3.3 Generalized variable-length integers"""
result = 0
w = 1
j = 0
while 1:
try:
char = ord(extended[extpos])
except IndexError:
if errors == "strict":
raise UnicodeError, "incomplete punicode string"
return extpos + 1, None
extpos += 1
if 0x41 <= char <= 0x5A: # A-Z
digit = char - 0x41
elif 0x30 <= char <= 0x39:
digit = char - 22 # 0x30-26
elif errors == "strict":
raise UnicodeError("Invalid extended code point '%s'"
% extended[extpos])
else:
return extpos, None
t = T(j, bias)
result += digit * w
if digit < t:
return extpos, result
w = w * (36 - t)
j += 1
def insertion_sort(base, extended, errors):
"""3.2 Insertion unsort coding"""
char = 0x80
pos = -1
bias = 72
extpos = 0
while extpos < len(extended):
newpos, delta = decode_generalized_number(extended, extpos,
bias, errors)
if delta is None:
# There was an error in decoding. We can't continue because
# synchronization is lost.
return base
pos += delta+1
char += pos // (len(base) + 1)
if char > 0x10FFFF:
if errors == "strict":
raise UnicodeError, ("Invalid character U+%x" % char)
char = ord('?')
pos = pos % (len(base) + 1)
base = base[:pos] + unichr(char) + base[pos:]
bias = adapt(delta, (extpos == 0), len(base))
extpos = newpos
return base
def punycode_decode(text, errors):
pos = text.rfind("-")
if pos == -1:
base = ""
extended = text
else:
base = text[:pos]
extended = text[pos+1:]
base = unicode(base, "ascii", errors)
extended = extended.upper()
return insertion_sort(base, extended, errors)
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
res = punycode_encode(input)
return res, len(input)
def decode(self,input,errors='strict'):
if errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+errors
res = punycode_decode(input, errors)
return res, len(input)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return punycode_encode(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
if self.errors not in ('strict', 'replace', 'ignore'):
raise UnicodeError, "Unsupported error handling "+self.errors
return punycode_decode(input, self.errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='punycode',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
mit
|
etherkit/OpenBeacon2
|
client/win/venv/Lib/site-packages/pip/_vendor/lockfile/symlinklockfile.py
|
536
|
2616
|
from __future__ import absolute_import
import os
import time
from . import (LockBase, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class SymlinkLockFile(LockBase):
"""Lock access to a file using symlink(2)."""
def __init__(self, path, threaded=True, timeout=None):
# super(SymlinkLockFile).__init(...)
LockBase.__init__(self, path, threaded, timeout)
# split it back!
self.unique_name = os.path.split(self.unique_name)[1]
def acquire(self, timeout=None):
# Hopefully unnecessary for symlink.
# try:
# open(self.unique_name, "wb").close()
# except IOError:
# raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a symbolic link to it.
try:
os.symlink(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
if self.i_am_locking():
# Linked to out unique name. Proceed.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout / 10 if timeout is not None else 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.islink(self.lock_file)
def i_am_locking(self):
return (os.path.islink(self.lock_file)
and os.readlink(self.lock_file) == self.unique_name)
def break_lock(self):
if os.path.islink(self.lock_file): # exists && link
os.unlink(self.lock_file)
|
gpl-3.0
|
lizardsystem/flooding
|
flooding_base/static/weblib/OpenLayers-2.12-rc7/tools/oldot.py
|
295
|
1359
|
import re
import os
def run():
sourceDirectory = "../lib/OpenLayers"
allFiles = []
SUFFIX_JAVASCRIPT = ".js"
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
data = open(os.path.join(sourceDirectory, filepath)).read()
parents = re.search("OpenLayers.Class\((.*?){", data,
re.DOTALL)
if parents:
parents = [x.strip() for x in parents.group(1).strip().strip(",").split(",")]
else:
parents = []
cls = "OpenLayers.%s" % filepath.strip(".js").replace("/", ".")
allFiles.append([cls, parents])
return allFiles
print """
digraph name {
fontname = "Helvetica"
fontsize = 8
K = 0.6
node [
fontname = "Helvetica"
fontsize = 8
shape = "plaintext"
]
"""
for i in run():
print i[0].replace(".", "_")
for item in i[1]:
if not item: continue
print "%s -> %s" % (i[0].replace(".","_"), item.replace(".", "_"))
print "; "
print """}"""
|
gpl-3.0
|
pkess/beets
|
test/test_the.py
|
10
|
2781
|
# -*- coding: utf-8 -*-
"""Tests for the 'the' plugin"""
from __future__ import division, absolute_import, print_function
import unittest
from test import _common
from beets import config
from beetsplug.the import ThePlugin, PATTERN_A, PATTERN_THE, FORMAT
class ThePluginTest(_common.TestCase):
def test_unthe_with_default_patterns(self):
self.assertEqual(ThePlugin().unthe(u'', PATTERN_THE), '')
self.assertEqual(ThePlugin().unthe(u'The Something', PATTERN_THE),
u'Something, The')
self.assertEqual(ThePlugin().unthe(u'The The', PATTERN_THE),
u'The, The')
self.assertEqual(ThePlugin().unthe(u'The The', PATTERN_THE),
u'The, The')
self.assertEqual(ThePlugin().unthe(u'The The X', PATTERN_THE),
u'The X, The')
self.assertEqual(ThePlugin().unthe(u'the The', PATTERN_THE),
u'The, the')
self.assertEqual(ThePlugin().unthe(u'Protected The', PATTERN_THE),
u'Protected The')
self.assertEqual(ThePlugin().unthe(u'A Boy', PATTERN_A),
u'Boy, A')
self.assertEqual(ThePlugin().unthe(u'a girl', PATTERN_A),
u'girl, a')
self.assertEqual(ThePlugin().unthe(u'An Apple', PATTERN_A),
u'Apple, An')
self.assertEqual(ThePlugin().unthe(u'An A Thing', PATTERN_A),
u'A Thing, An')
self.assertEqual(ThePlugin().unthe(u'the An Arse', PATTERN_A),
u'the An Arse')
def test_unthe_with_strip(self):
config['the']['strip'] = True
self.assertEqual(ThePlugin().unthe(u'The Something', PATTERN_THE),
u'Something')
self.assertEqual(ThePlugin().unthe(u'An A', PATTERN_A), u'A')
def test_template_function_with_defaults(self):
ThePlugin().patterns = [PATTERN_THE, PATTERN_A]
self.assertEqual(ThePlugin().the_template_func(u'The The'),
u'The, The')
self.assertEqual(ThePlugin().the_template_func(u'An A'), u'A, An')
def test_custom_pattern(self):
config['the']['patterns'] = [u'^test\s']
config['the']['format'] = FORMAT
self.assertEqual(ThePlugin().the_template_func(u'test passed'),
u'passed, test')
def test_custom_format(self):
config['the']['patterns'] = [PATTERN_THE, PATTERN_A]
config['the']['format'] = u'{1} ({0})'
self.assertEqual(ThePlugin().the_template_func(u'The A'), u'The (A)')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
mit
|
jvkops/django
|
tests/generic_views/models.py
|
382
|
1631
|
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import QuerySet
from django.db.models.manager import BaseManager
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Artist(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ['name']
verbose_name = 'professional artist'
verbose_name_plural = 'professional artists'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('artist_detail', kwargs={'pk': self.id})
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class DoesNotExistQuerySet(QuerySet):
def get(self, *args, **kwargs):
raise Author.DoesNotExist
DoesNotExistBookManager = BaseManager.from_queryset(DoesNotExistQuerySet)
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=300)
slug = models.SlugField()
pages = models.IntegerField()
authors = models.ManyToManyField(Author)
pubdate = models.DateField()
objects = models.Manager()
does_not_exist = DoesNotExistBookManager()
class Meta:
ordering = ['-pubdate']
def __str__(self):
return self.name
class Page(models.Model):
content = models.TextField()
template = models.CharField(max_length=300)
class BookSigning(models.Model):
event_date = models.DateTimeField()
|
bsd-3-clause
|
tysonholub/twilio-python
|
twilio/rest/sync/v1/service/sync_list/sync_list_item.py
|
1
|
20321
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SyncListItemList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid, list_sid):
"""
Initialize the SyncListItemList
:param Version version: Version that contains the resource
:param service_sid: The SID of the Sync Service that the resource is associated with
:param list_sid: The SID of the Sync List that contains the List Item
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemList
"""
super(SyncListItemList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'list_sid': list_sid, }
self._uri = '/Services/{service_sid}/Lists/{list_sid}/Items'.format(**self._solution)
def create(self, data, ttl=values.unset, item_ttl=values.unset,
collection_ttl=values.unset):
"""
Create a new SyncListItemInstance
:param dict data: A JSON string that represents an arbitrary, schema-less object that the List Item stores
:param unicode ttl: An alias for item_ttl
:param unicode item_ttl: How long, in seconds, before the List Item expires
:param unicode collection_ttl: How long, in seconds, before the List Item's parent Sync List expires
:returns: Newly created SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
"""
data = values.of({
'Data': serialize.object(data),
'Ttl': ttl,
'ItemTtl': item_ttl,
'CollectionTtl': collection_ttl,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return SyncListItemInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
)
def stream(self, order=values.unset, from_=values.unset, bounds=values.unset,
limit=None, page_size=None):
"""
Streams SyncListItemInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param SyncListItemInstance.QueryResultOrder order: The order to return the List Items
:param unicode from_: The index of the first Sync List Item resource to read
:param SyncListItemInstance.QueryFromBoundType bounds: Whether to include the List Item referenced by the from parameter
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(order=order, from_=from_, bounds=bounds, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, order=values.unset, from_=values.unset, bounds=values.unset,
limit=None, page_size=None):
"""
Lists SyncListItemInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param SyncListItemInstance.QueryResultOrder order: The order to return the List Items
:param unicode from_: The index of the first Sync List Item resource to read
:param SyncListItemInstance.QueryFromBoundType bounds: Whether to include the List Item referenced by the from parameter
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance]
"""
return list(self.stream(order=order, from_=from_, bounds=bounds, limit=limit, page_size=page_size, ))
def page(self, order=values.unset, from_=values.unset, bounds=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of SyncListItemInstance records from the API.
Request is executed immediately
:param SyncListItemInstance.QueryResultOrder order: The order to return the List Items
:param unicode from_: The index of the first Sync List Item resource to read
:param SyncListItemInstance.QueryFromBoundType bounds: Whether to include the List Item referenced by the from parameter
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemPage
"""
params = values.of({
'Order': order,
'From': from_,
'Bounds': bounds,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SyncListItemPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SyncListItemInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SyncListItemPage(self._version, response, self._solution)
def get(self, index):
"""
Constructs a SyncListItemContext
:param index: The index of the Sync List Item resource to fetch
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemContext
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemContext
"""
return SyncListItemContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=index,
)
def __call__(self, index):
"""
Constructs a SyncListItemContext
:param index: The index of the Sync List Item resource to fetch
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemContext
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemContext
"""
return SyncListItemContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=index,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.SyncListItemList>'
class SyncListItemPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the SyncListItemPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The SID of the Sync Service that the resource is associated with
:param list_sid: The SID of the Sync List that contains the List Item
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemPage
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemPage
"""
super(SyncListItemPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SyncListItemInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
"""
return SyncListItemInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.SyncListItemPage>'
class SyncListItemContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, service_sid, list_sid, index):
"""
Initialize the SyncListItemContext
:param Version version: Version that contains the resource
:param service_sid: The SID of the Sync Service with the Sync List Item resource to fetch
:param list_sid: The SID of the Sync List with the Sync List Item resource to fetch
:param index: The index of the Sync List Item resource to fetch
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemContext
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemContext
"""
super(SyncListItemContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'list_sid': list_sid, 'index': index, }
self._uri = '/Services/{service_sid}/Lists/{list_sid}/Items/{index}'.format(**self._solution)
def fetch(self):
"""
Fetch a SyncListItemInstance
:returns: Fetched SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SyncListItemInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=self._solution['index'],
)
def delete(self):
"""
Deletes the SyncListItemInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, data=values.unset, ttl=values.unset, item_ttl=values.unset,
collection_ttl=values.unset):
"""
Update the SyncListItemInstance
:param dict data: A JSON string that represents an arbitrary, schema-less object that the List Item stores
:param unicode ttl: An alias for item_ttl
:param unicode item_ttl: How long, in seconds, before the List Item expires
:param unicode collection_ttl: How long, in seconds, before the List Item's parent Sync List expires
:returns: Updated SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
"""
data = values.of({
'Data': serialize.object(data),
'Ttl': ttl,
'ItemTtl': item_ttl,
'CollectionTtl': collection_ttl,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SyncListItemInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=self._solution['index'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Sync.V1.SyncListItemContext {}>'.format(context)
class SyncListItemInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class QueryResultOrder(object):
ASC = "asc"
DESC = "desc"
class QueryFromBoundType(object):
INCLUSIVE = "inclusive"
EXCLUSIVE = "exclusive"
def __init__(self, version, payload, service_sid, list_sid, index=None):
"""
Initialize the SyncListItemInstance
:returns: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
"""
super(SyncListItemInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'index': deserialize.integer(payload.get('index')),
'account_sid': payload.get('account_sid'),
'service_sid': payload.get('service_sid'),
'list_sid': payload.get('list_sid'),
'url': payload.get('url'),
'revision': payload.get('revision'),
'data': payload.get('data'),
'date_expires': deserialize.iso8601_datetime(payload.get('date_expires')),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'created_by': payload.get('created_by'),
}
# Context
self._context = None
self._solution = {
'service_sid': service_sid,
'list_sid': list_sid,
'index': index or self._properties['index'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListItemContext for this SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemContext
"""
if self._context is None:
self._context = SyncListItemContext(
self._version,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
index=self._solution['index'],
)
return self._context
@property
def index(self):
"""
:returns: The automatically generated index of the List Item
:rtype: unicode
"""
return self._properties['index']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The SID of the Sync Service that the resource is associated with
:rtype: unicode
"""
return self._properties['service_sid']
@property
def list_sid(self):
"""
:returns: The SID of the Sync List that contains the List Item
:rtype: unicode
"""
return self._properties['list_sid']
@property
def url(self):
"""
:returns: The absolute URL of the List Item resource
:rtype: unicode
"""
return self._properties['url']
@property
def revision(self):
"""
:returns: The current revision of the item, represented as a string
:rtype: unicode
"""
return self._properties['revision']
@property
def data(self):
"""
:returns: An arbitrary, schema-less object that the List Item stores
:rtype: dict
"""
return self._properties['data']
@property
def date_expires(self):
"""
:returns: The ISO 8601 date and time in GMT when the List Item expires
:rtype: datetime
"""
return self._properties['date_expires']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def created_by(self):
"""
:returns: The identity of the List Item's creator
:rtype: unicode
"""
return self._properties['created_by']
def fetch(self):
"""
Fetch a SyncListItemInstance
:returns: Fetched SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the SyncListItemInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, data=values.unset, ttl=values.unset, item_ttl=values.unset,
collection_ttl=values.unset):
"""
Update the SyncListItemInstance
:param dict data: A JSON string that represents an arbitrary, schema-less object that the List Item stores
:param unicode ttl: An alias for item_ttl
:param unicode item_ttl: How long, in seconds, before the List Item expires
:param unicode collection_ttl: How long, in seconds, before the List Item's parent Sync List expires
:returns: Updated SyncListItemInstance
:rtype: twilio.rest.sync.v1.service.sync_list.sync_list_item.SyncListItemInstance
"""
return self._proxy.update(data=data, ttl=ttl, item_ttl=item_ttl, collection_ttl=collection_ttl, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Sync.V1.SyncListItemInstance {}>'.format(context)
|
mit
|
rht/zulip
|
zerver/webhooks/freshdesk/view.py
|
1
|
6058
|
"""Webhooks for external integrations."""
import logging
from typing import Any, Dict, List
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import authenticated_rest_api_view
from zerver.lib.email_notifications import convert_html_to_markdown
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
NOTE_TEMPLATE = "{name} <{email}> added a {note_type} note to [ticket #{ticket_id}]({ticket_url})."
PROPERTY_CHANGE_TEMPLATE = """
{name} <{email}> updated [ticket #{ticket_id}]({ticket_url}):
* **{property_name}**: {old} -> {new}
""".strip()
TICKET_CREATION_TEMPLATE = """
{name} <{email}> created [ticket #{ticket_id}]({ticket_url}):
``` quote
{description}
```
* **Type**: {type}
* **Priority**: {priority}
* **Status**: {status}
""".strip()
class TicketDict(Dict[str, Any]):
"""
A helper class to turn a dictionary with ticket information into
an object where each of the keys is an attribute for easy access.
"""
def __getattr__(self, field: str) -> Any:
if "_" in field:
return self.get(field)
else:
return self.get("ticket_" + field)
def property_name(property: str, index: int) -> str:
"""The Freshdesk API is currently pretty broken: statuses are customizable
but the API will only tell you the number associated with the status, not
the name. While we engage the Freshdesk developers about exposing this
information through the API, since only FlightCar uses this integration,
hardcode their statuses.
"""
statuses = ["", "", "Open", "Pending", "Resolved", "Closed",
"Waiting on Customer", "Job Application", "Monthly"]
priorities = ["", "Low", "Medium", "High", "Urgent"]
name = ""
if property == "status":
name = statuses[index] if index < len(statuses) else str(index)
elif property == "priority":
name = priorities[index] if index < len(priorities) else str(index)
return name
def parse_freshdesk_event(event_string: str) -> List[str]:
"""These are always of the form "{ticket_action:created}" or
"{status:{from:4,to:6}}". Note the lack of string quoting: this isn't
valid JSON so we have to parse it ourselves.
"""
data = event_string.replace("{", "").replace("}", "").replace(",", ":").split(":")
if len(data) == 2:
# This is a simple ticket action event, like
# {ticket_action:created}.
return data
else:
# This is a property change event, like {status:{from:4,to:6}}. Pull out
# the property, from, and to states.
property, _, from_state, _, to_state = data
return [property, property_name(property, int(from_state)),
property_name(property, int(to_state))]
def format_freshdesk_note_message(ticket: TicketDict, event_info: List[str]) -> str:
"""There are public (visible to customers) and private note types."""
note_type = event_info[1]
content = NOTE_TEMPLATE.format(
name=ticket.requester_name,
email=ticket.requester_email,
note_type=note_type,
ticket_id=ticket.id,
ticket_url=ticket.url
)
return content
def format_freshdesk_property_change_message(ticket: TicketDict, event_info: List[str]) -> str:
"""Freshdesk will only tell us the first event to match our webhook
configuration, so if we change multiple properties, we only get the before
and after data for the first one.
"""
content = PROPERTY_CHANGE_TEMPLATE.format(
name=ticket.requester_name,
email=ticket.requester_email,
ticket_id=ticket.id,
ticket_url=ticket.url,
property_name=event_info[0].capitalize(),
old=event_info[1],
new=event_info[2]
)
return content
def format_freshdesk_ticket_creation_message(ticket: TicketDict) -> str:
"""They send us the description as HTML."""
cleaned_description = convert_html_to_markdown(ticket.description)
content = TICKET_CREATION_TEMPLATE.format(
name=ticket.requester_name,
email=ticket.requester_email,
ticket_id=ticket.id,
ticket_url=ticket.url,
description=cleaned_description,
type=ticket.type,
priority=ticket.priority,
status=ticket.status
)
return content
@authenticated_rest_api_view(webhook_client_name="Freshdesk")
@has_request_variables
def api_freshdesk_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
ticket_data = payload["freshdesk_webhook"]
required_keys = [
"triggered_event", "ticket_id", "ticket_url", "ticket_type",
"ticket_subject", "ticket_description", "ticket_status",
"ticket_priority", "requester_name", "requester_email",
]
for key in required_keys:
if ticket_data.get(key) is None:
logging.warning("Freshdesk webhook error. Payload was:")
logging.warning(request.body)
return json_error(_("Missing key %s in JSON") % (key,))
ticket = TicketDict(ticket_data)
subject = "#{ticket_id}: {ticket_subject}".format(
ticket_id=ticket.id,
ticket_subject=ticket.subject
)
event_info = parse_freshdesk_event(ticket.triggered_event)
if event_info[1] == "created":
content = format_freshdesk_ticket_creation_message(ticket)
elif event_info[0] == "note_type":
content = format_freshdesk_note_message(ticket, event_info)
elif event_info[0] in ("status", "priority"):
content = format_freshdesk_property_change_message(ticket, event_info)
else:
# Not an event we know handle; do nothing.
return json_success()
check_send_webhook_message(request, user_profile, subject, content)
return json_success()
|
apache-2.0
|
hlange/LogSoCR
|
pysc/usi/systemc.py
|
1
|
1548
|
from __future__ import print_function
import logging
import usi.api.systemc as api
# Renaming time constants for easy reuse
FS = api.SC_FS
PS = api.SC_PS
NS = api.SC_NS
US = api.SC_US
MS = api.SC_MS
SEC = api.SC_SEC
"""Time constants units"""
TIME_UNITS = {
FS:"fs",
PS:"ps",
NS:"ns",
US:"us",
MS:"ms",
SEC:"s"
}
logger = logging.getLogger(__name__)
def start(*k, **kw):
if api.is_running():
if hasattr(api, "start"):
api.start(*k)
else:
logger.warning("sc_start is not implemented")
from usi import shell
if shell.is_running():
shell.stop()
def stop(*k, **kw):
if api.is_running():
api.stop()
from usi import shell
if shell.is_running():
shell.stop()
def pause(*k, **kw):
from usi import shell
if not shell.is_running():
api.pause()
simulation_time = api.simulation_time
delta_count = api.delta_count
set_verbosity = api.set_verbosity
#spawn = api.spawn
is_running = api.is_running
get_top_level_objects = api.get_top_level_objects
def wait(obj, tu=None):
"""
if obj is event or event tree,
call obj.wait(); else it is a scalar
"""
#from pysc import thread_control
if hasattr(obj, "wait"):
obj.wait()
return
if tu == None:
api.wait(obj)
else:
api.wait(obj, tu)
# support for thread manipulation: pause, reset, kill, etc
#thread_control()
# Utilities
def time(tu=None):
if tu==None: tu=NS
return "time=%d (delta=%d)" % (simulation_time(tu), delta_count())
|
agpl-3.0
|
waytai/odoo
|
addons/hr_timesheet_invoice/report/__init__.py
|
433
|
1136
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_profit
import report_analytic
import hr_timesheet_invoice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
asidev/aybu-manager
|
aybu/manager/activity_log/packages.py
|
1
|
2606
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010-2012 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pkg_resources
import shlex
import shutil
import subprocess
from . action import Action
__all__ = ['install', 'uninstall']
class Pip(Action):
def __init__(self, path, virtualenv, package_name):
super(Pip, self).__init__()
self.python = os.path.join(virtualenv, 'bin', 'python')
self.script = pkg_resources.resource_filename('aybu.manager.utils',
'pipwrapper.py')
self.virtualenv = virtualenv
self.path = path
self.package_name = package_name
def install(self):
command = "{} {} install -e {}".format(self.python, self.script,
self.path)
self.log.info("installing from %s", self.path)
self.log.debug(command)
try:
subprocess.check_output(shlex.split(command))
except subprocess.CalledProcessError as e:
self.log.error(e.output)
raise e
def uninstall(self):
cmd = "{} {} uninstall -y {}".format(self.python, self.script,
self.package_name)
self.log.info("uninstall: %s", self.package_name)
subprocess.check_call(shlex.split(cmd))
# remove egg_info directory
egginfo_dir = "{}.egg-info".format(self.package_name.replace("-", "_"))
egginfo_dir = os.path.join(self.path, egginfo_dir)
if os.path.isdir(egginfo_dir):
shutil.rmtree(egginfo_dir)
def commit(self):
pass
class install(Pip):
def __init__(self, path, virtualenv, package_name):
super(install, self).__init__(path, virtualenv, package_name)
self.install()
def rollback(self):
self.uninstall()
class uninstall(Pip):
def __init__(self, path, virtualenv, package_name):
super(uninstall, self).__init__(path, virtualenv, package_name)
self.uninstall()
def rollback(self):
self.install()
|
apache-2.0
|
brchiu/tensorflow
|
tensorflow/python/saved_model/tag_constants.py
|
10
|
1881
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common tags used for graphs in SavedModel.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# Tag for the `serving` graph.
SERVING = "serve"
tf_export(
"saved_model.SERVING",
v1=["saved_model.SERVING",
"saved_model.tag_constants.SERVING"]).export_constant(
__name__, "SERVING")
# Tag for the `training` graph.
TRAINING = "train"
tf_export(
"saved_model.TRANING",
v1=["saved_model.TRAINING",
"saved_model.tag_constants.TRAINING"]).export_constant(
__name__, "TRAINING")
# Tag for the `eval` graph. Not exported while the export logic is in contrib.
EVAL = "eval"
# Tag for the `gpu` graph.
GPU = "gpu"
tf_export(
"saved_model.GPU", v1=["saved_model.GPU",
"saved_model.tag_constants.GPU"]).export_constant(
__name__, "GPU")
# Tag for the `tpu` graph.
TPU = "tpu"
tf_export(
"saved_model.TPU", v1=["saved_model.TPU",
"saved_model.tag_constants.TPU"]).export_constant(
__name__, "TPU")
|
apache-2.0
|
Thhhza/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_font01.py
|
8
|
1851
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_font01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'bar'})
chart.axis_ids = [43945344, 45705856]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_title({'name': 'Title'})
chart.set_x_axis({
'name': 'XXX',
'num_font': {'size': 11, 'bold': 1, 'italic': 1}
})
chart.set_y_axis({
'name': 'YYY',
'num_font': {'size': 9, 'bold': 0, 'italic': True}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
bsd-2-clause
|
ahmed-mahran/hue
|
desktop/core/ext-py/Django-1.6.10/tests/template_tests/test_smartif.py
|
150
|
2190
|
from django.template.smartif import IfParser
from django.utils import unittest
class SmartIfTests(unittest.TestCase):
def assertCalcEqual(self, expected, tokens):
self.assertEqual(expected, IfParser(tokens).parse().eval({}))
# We only test things here that are difficult to test elsewhere
# Many other tests are found in the main tests for builtin template tags
# Test parsing via the printed parse tree
def test_not(self):
var = IfParser(["not", False]).parse()
self.assertEqual("(not (literal False))", repr(var))
self.assertTrue(var.eval({}))
self.assertFalse(IfParser(["not", True]).parse().eval({}))
def test_or(self):
var = IfParser([True, "or", False]).parse()
self.assertEqual("(or (literal True) (literal False))", repr(var))
self.assertTrue(var.eval({}))
def test_in(self):
list_ = [1,2,3]
self.assertCalcEqual(True, [1, 'in', list_])
self.assertCalcEqual(False, [1, 'in', None])
self.assertCalcEqual(False, [None, 'in', list_])
def test_not_in(self):
list_ = [1,2,3]
self.assertCalcEqual(False, [1, 'not', 'in', list_])
self.assertCalcEqual(True, [4, 'not', 'in', list_])
self.assertCalcEqual(False, [1, 'not', 'in', None])
self.assertCalcEqual(True, [None, 'not', 'in', list_])
def test_precedence(self):
# (False and False) or True == True <- we want this one, like Python
# False and (False or True) == False
self.assertCalcEqual(True, [False, 'and', False, 'or', True])
# True or (False and False) == True <- we want this one, like Python
# (True or False) and False == False
self.assertCalcEqual(True, [True, 'or', False, 'and', False])
# (1 or 1) == 2 -> False
# 1 or (1 == 2) -> True <- we want this one
self.assertCalcEqual(True, [1, 'or', 1, '==', 2])
self.assertCalcEqual(True, [True, '==', True, 'or', True, '==', False])
self.assertEqual("(or (and (== (literal 1) (literal 2)) (literal 3)) (literal 4))",
repr(IfParser([1, '==', 2, 'and', 3, 'or', 4]).parse()))
|
apache-2.0
|
jeffreyliu3230/osf.io
|
tasks.py
|
9
|
23940
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from invoke import task, run
from website import settings
logging.getLogger('invoke').setLevel(logging.CRITICAL)
HERE = os.path.dirname(os.path.abspath(__file__))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def get_bin_path():
"""Get parent path of current python binary.
"""
return os.path.dirname(sys.executable)
def bin_prefix(cmd):
"""Prefix command with current binary path.
"""
return os.path.join(get_bin_path(), cmd)
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
@task
def server(host=None, port=5000, debug=True, live=False):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH])
@task
def apiserver(port=8000, live=False):
"""Run the API server."""
cmd = 'python manage.py runserver {}'.format(port)
if live:
cmd += ' livereload'
run(cmd, echo=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
Available variables:
{context}
"""
def make_shell_context():
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
app = init_app()
context = {
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell():
context = make_shell_context()
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context)
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_host=None, db_port=None, db_name=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_host:
os.environ['SHAREJS_DB_HOST'] = db_host
if db_port:
os.environ['SHAREJS_DB_PORT'] = db_port
if db_name:
os.environ['SHAREJS_DB_NAME'] = db_name
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug"):
"""Run the Celery process."""
cmd = 'celery worker -A framework.tasks -l {0}'.format(level)
run(bin_prefix(cmd))
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search():
"""Delete and recreate the index for elasticsearch"""
run("curl -s -XDELETE {uri}/{index}*".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
run("curl -s -XPUT {uri}/{index}".format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search()
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task
def jshint():
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
run(cmd, echo=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
def pip_install(req_file):
"""Return the proper 'pip install' command for installing the dependencies
defined in ``req_file``.
"""
cmd = bin_prefix('pip install --exists-action w --upgrade -r {} '.format(req_file))
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
return cmd
@task(aliases=['req'])
def requirements(addons=False, release=False, dev=False):
"""Install python dependencies.
Examples:
inv requirements --dev
inv requirements --addons
inv requirements --release
"""
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(pip_install(req_file), echo=True)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake()
jshint()
test_osf()
if all:
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False):
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if not addons:
return
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --exists-action w --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task
def npm_bower():
print('Installing bower')
run('npm install -g bower', echo=True)
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
from website.app import build_js_config_files
from website import settings
# Build nodeCategories.json before building assets
build_js_config_files(settings)
assets(dev=True, watch=False)
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts.analytics import (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--abbrev=40"
], stderr=subprocess.STDOUT
).decode().split("-")
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == "dirty":
info["dirty"] = True
describe_out.pop()
info["commit_sha"] = describe_out.pop().lstrip("g")
info["distance_to_latest_tag"] = int(describe_out.pop())
info["current_version"] = describe_out.pop().lstrip("v")
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def assets(dev=False, watch=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print("Parsed {} styles".format(total))
|
apache-2.0
|
timeartist/ufyr
|
ufyr/metadata.py
|
1
|
2664
|
from redis import StrictRedis
class MetadataBase(object):
def __init__(self, key_base=None, **kwargs):
self.r = StrictRedis(**kwargs)
self.key_base = key_base or 'ufyr:%s' ##Should probably be subclassed
self._get_meta_key = lambda x: self.key_base%x
self._error_key = self._get_meta_key('err:%s')
self._get_error_key = lambda x: self._error_key%x
self._lockout_key = self._get_meta_key('lk:%s')
self._get_lockout_key = lambda x: self._lockout_key%x
def set_metadata(self, key, dict_val):
self.r.hmset(self._get_meta_key(key), dict_val)
def get_metadata(self, key ):
return self.r.hgetall(self._get_meta_key(key))
def get_metadata_multi(self, ids):
assert isinstance(ids, (tuple, list, set))
pipe = self.r.pipeline()
for _id in ids:
pipe.hgetall(self._get_meta_key(_id))
return pipe.execute()
def delete_metadata(self, key):
return self.r.delete(self._get_meta_key(key))
def set_lockout(self, key, expire=7200):
key = self._get_lockout_key(key)
self.r.set(key, 1)
self.r.expire(key, expire)
def remove_lockout(self, key):
self.r.delete(self._get_lockout_key(key))
def is_locked_out(self, key):
return bool(self.r.keys(self._get_lockout_key(key)))
def set_error(self, key, val=''):
current_error = self.r.get(self._get_error_key(key)) or ''
new_error = current_error + '\n' + val
self.r.set(self._get_error_key(key), new_error)
def get_error(self, key):
return self.r.get(self._get_error_key(key)) or ''
def clear_error(self, key):
self.r.delete(self._get_error_key(key))
def monitor(self):
keys = self.r.keys(self._get_meta_key('*'))
ret = {}
for key in keys:
if ':err:' in key or ':lk:' in key:
continue
ret_key = ':'.join(key.split(':')[1:])
ret[ret_key] = self.r.hgetall(key)
return ret
def monitor_lockout(self):
keys = self.r.keys(self._get_lockout_key('*'))
ret = {}
for key in keys:
ret_key = key.lstrip(self._get_lockout_key(''))
ret[ret_key] = self.r.ttl(key)
return ret
def monitor_errors(self):
keys = self.r.keys(self._get_error_key('*'))
ret = {}
for key in keys:
ret_key = key.lstrip(self._get_error_key(''))
ret[ret_key] = self.r.get(key)
return ret
|
unlicense
|
ghandiosm/Test
|
addons/mail/models/mail_template.py
|
13
|
26916
|
# -*- coding: utf-8 -*-
import base64
import datetime
import dateutil.relativedelta as relativedelta
import logging
import lxml
import urlparse
import openerp
from urllib import urlencode, quote as quote
from openerp import _, api, fields, models, SUPERUSER_ID
from openerp import tools
from openerp import report as odoo_report
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
def format_tz(pool, cr, uid, dt, tz=False, format=False, context=None):
context = dict(context or {})
if tz:
context['tz'] = tz or pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz'] or "UTC"
timestamp = datetime.datetime.strptime(dt, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ts = openerp.osv.fields.datetime.context_timestamp(cr, uid, timestamp, context)
if format:
return ts.strftime(format)
else:
lang = context.get("lang")
lang_params = {}
if lang:
res_lang = pool.get('res.lang')
ids = res_lang.search(cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(cr, uid, ids[0], ["date_format", "time_format"])
format_date = lang_params.get("date_format", '%B-%d-%Y')
format_time = lang_params.get("time_format", '%I-%M %p')
fdate = ts.strftime(format_date)
ftime = ts.strftime(format_time)
return "%s %s%s" % (fdate, ftime, (' (%s)' % tz) if tz else '')
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently.
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class MailTemplate(models.Model):
"Templates for sending email"
_name = "mail.template"
_description = 'Email Templates'
_order = 'name'
@api.model
def default_get(self, fields):
res = super(MailTemplate, self).default_get(fields)
if res.get('model'):
res['model_id'] = self.env['ir.model'].search([('model', '=', res.pop('model'))]).id
return res
name = fields.Char('Name')
model_id = fields.Many2one('ir.model', 'Applies to', help="The kind of document with with this template can be used")
model = fields.Char('Related Document Model', related='model_id.model', select=True, store=True, readonly=True)
lang = fields.Char('Language',
help="Optional translation language (ISO code) to select when sending out an email. "
"If not set, the english version will be used. "
"This should usually be a placeholder expression "
"that provides the appropriate language, e.g. "
"${object.partner_id.lang}.",
placeholder="${object.partner_id.lang}")
user_signature = fields.Boolean('Add Signature',
help="If checked, the user's signature will be appended to the text version "
"of the message")
subject = fields.Char('Subject', translate=True, help="Subject (placeholders may be used here)")
email_from = fields.Char('From',
help="Sender address (placeholders may be used here). If not set, the default "
"value will be the author's email alias if configured, or email address.")
use_default_to = fields.Boolean(
'Default recipients',
help="Default recipients of the record:\n"
"- partner (using id on a partner or the partner_id field) OR\n"
"- email (using email_from or email field)")
email_to = fields.Char('To (Emails)', help="Comma-separated recipient addresses (placeholders may be used here)")
partner_to = fields.Char('To (Partners)', oldname='email_recipients',
help="Comma-separated ids of recipient partners (placeholders may be used here)")
email_cc = fields.Char('Cc', help="Carbon copy recipients (placeholders may be used here)")
reply_to = fields.Char('Reply-To', help="Preferred response address (placeholders may be used here)")
mail_server_id = fields.Many2one('ir.mail_server', 'Outgoing Mail Server', readonly=False,
help="Optional preferred server for outgoing mails. If not set, the highest "
"priority one will be used.")
body_html = fields.Html('Body', translate=True, sanitize=False, help="Rich-text/HTML version of the message (placeholders may be used here)")
report_name = fields.Char('Report Filename', translate=True,
help="Name to use for the generated report file (may contain placeholders)\n"
"The extension can be omitted and will then come from the report type.")
report_template = fields.Many2one('ir.actions.report.xml', 'Optional report to print and attach')
ref_ir_act_window = fields.Many2one('ir.actions.act_window', 'Sidebar action', readonly=True, copy=False,
help="Sidebar action to make this template available on records "
"of the related document model")
ref_ir_value = fields.Many2one('ir.values', 'Sidebar Button', readonly=True, copy=False,
help="Sidebar button to open the sidebar action")
attachment_ids = fields.Many2many('ir.attachment', 'email_template_attachment_rel', 'email_template_id',
'attachment_id', 'Attachments',
help="You may attach files to this template, to be added to all "
"emails created from this template")
auto_delete = fields.Boolean('Auto Delete', default=True, help="Permanently delete this email after sending it, to save space")
# Fake fields used to implement the placeholder assistant
model_object_field = fields.Many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship.")
sub_object = fields.Many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to.")
sub_model_object_field = fields.Many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model).")
null_value = fields.Char('Default Value', help="Optional value to use if the target field is empty")
copyvalue = fields.Char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field.")
@api.onchange('model_id')
def onchange_model_id(self):
# TDE CLEANME: should'nt it be a stored related ?
if self.model_id:
self.model = self.model_id.model
else:
self.model = False
def build_expression(self, field_name, sub_field_name, null_value):
"""Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:param null_value: default value if the target value is empty
:return: final placeholder expression """
expression = ''
if field_name:
expression = "${object." + field_name
if sub_field_name:
expression += "." + sub_field_name
if null_value:
expression += " or '''%s'''" % null_value
expression += "}"
return expression
@api.onchange('model_object_field', 'sub_model_object_field', 'null_value')
def onchange_sub_model_object_value_field(self):
if self.model_object_field:
if self.model_object_field.ttype in ['many2one', 'one2many', 'many2many']:
models = self.env['ir.model'].search([('model', '=', self.model_object_field.relation)])
if models:
self.sub_object = models.id
self.copyvalue = self.build_expression(self.model_object_field.name, self.sub_model_object_field and self.sub_model_object_field.name or False, self.null_value or False)
else:
self.sub_object = False
self.sub_model_object_field = False
self.copyvalue = self.build_expression(self.model_object_field.name, False, self.null_value or False)
else:
self.sub_object = False
self.copyvalue = False
self.sub_model_object_field = False
self.null_value = False
@api.multi
def unlink(self):
self.unlink_action()
return super(MailTemplate, self).unlink()
@api.multi
def copy(self, default=None):
default = dict(default or {},
name=_("%s (copy)") % self.name)
return super(MailTemplate, self).copy(default=default)
@api.multi
def unlink_action(self):
for template in self:
if template.ref_ir_act_window:
template.ref_ir_act_window.sudo().unlink()
if template.ref_ir_value:
template.ref_ir_value.sudo().unlink()
return True
@api.multi
def create_action(self):
ActWindowSudo = self.env['ir.actions.act_window'].sudo()
IrValuesSudo = self.env['ir.values'].sudo()
view = self.env.ref('mail.email_compose_message_wizard_form')
for template in self:
src_obj = template.model_id.model
button_name = _('Send Mail (%s)') % template.name
action = ActWindowSudo.create({
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mail.compose.message',
'src_model': src_obj,
'view_type': 'form',
'context': "{'default_composition_mode': 'mass_mail', 'default_template_id' : %d, 'default_use_template': True}" % (template.id),
'view_mode': 'form,tree',
'view_id': view.id,
'target': 'new',
'auto_refresh': 1})
ir_value = IrValuesSudo.create({
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': "ir.actions.act_window,%s" % action.id})
template.write({
'ref_ir_act_window': action.id,
'ref_ir_value': ir_value.id,
})
return True
# ----------------------------------------
# RENDERING
# ----------------------------------------
@api.model
def _replace_local_links(self, html):
""" Post-processing of html content to replace local links to absolute
links, using web.base.url as base url. """
if not html:
return html
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
base_url = self.env['ir.config_parameter'].get_param('web.base.url')
(base_scheme, base_netloc, bpath, bparams, bquery, bfragment) = urlparse.urlparse(base_url)
def _process_link(url):
new_url = url
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if not scheme and not netloc:
new_url = urlparse.urlunparse((base_scheme, base_netloc, path, params, query, fragment))
return new_url
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag == 'a' and node.get('href'):
node.set('href', _process_link(node.get('href')))
elif node.tag == 'img' and not node.get('src', 'data').startswith('data'):
node.set('src', _process_link(node.get('src')))
html = lxml.html.tostring(root, pretty_print=False, method='html')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html
@api.model
def render_post_process(self, html):
html = self._replace_local_links(html)
return html
@api.model
def render_template(self, template_txt, model, res_ids, post_process=False):
""" Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with an evaluation
context containing:
- ``user``: browse_record of the current user
- ``object``: record of the document record this mail is related to
- ``context``: the context passed to the mail composition wizard
:param str template_txt: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_ids: list of ids of document records those mails are related to.
"""
multi_mode = True
if isinstance(res_ids, (int, long)):
multi_mode = False
res_ids = [res_ids]
results = dict.fromkeys(res_ids, u"")
# try to load the template
try:
template = mako_template_env.from_string(tools.ustr(template_txt))
except Exception:
_logger.info("Failed to load template %r", template_txt, exc_info=True)
return multi_mode and results or results[res_ids[0]]
# prepare template variables
records = self.env[model].browse(filter(None, res_ids)) # filter to avoid browsing [None]
res_to_rec = dict.fromkeys(res_ids, None)
for record in records:
res_to_rec[record.id] = record
variables = {
'format_tz': lambda dt, tz=False, format=False, context=self._context: format_tz(self.pool, self._cr, self._uid, dt, tz, format, context),
'user': self.env.user,
'ctx': self._context, # context kw would clash with mako internals
}
for res_id, record in res_to_rec.iteritems():
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.info("Failed to render template %r using values %r" % (template, variables), exc_info=True)
raise UserError(_("Failed to render template %r using values %r")% (template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
results[res_id] = render_result
if post_process:
for res_id, result in results.iteritems():
results[res_id] = self.render_post_process(result)
return multi_mode and results or results[res_ids[0]]
@api.multi
def get_email_template(self, res_ids):
multi_mode = True
if isinstance(res_ids, (int, long)):
res_ids = [res_ids]
multi_mode = False
if res_ids is None:
res_ids = [None]
results = dict.fromkeys(res_ids, False)
if not self.ids:
return results
self.ensure_one()
langs = self.render_template(self.lang, self.model, res_ids)
for res_id, lang in langs.iteritems():
if lang:
template = self.with_context(lang=lang)
else:
template = self
results[res_id] = template
return multi_mode and results or results[res_ids[0]]
@api.multi
def generate_recipients(self, results, res_ids):
"""Generates the recipients of the template. Default values can ben generated
instead of the template values if requested by template or context.
Emails (email_to, email_cc) can be transformed into partners if requested
in the context. """
self.ensure_one()
if self.use_default_to or self._context.get('tpl_force_default_to'):
default_recipients = self.env['mail.thread'].message_get_default_recipients(res_model=self.model, res_ids=res_ids)
for res_id, recipients in default_recipients.iteritems():
results[res_id].pop('partner_to', None)
results[res_id].update(recipients)
for res_id, values in results.iteritems():
partner_ids = values.get('partner_ids', list())
if self._context.get('tpl_partners_only'):
mails = tools.email_split(values.pop('email_to', '')) + tools.email_split(values.pop('email_cc', ''))
for mail in mails:
partner_id = self.env['res.partner'].find_or_create(mail)
partner_ids.append(partner_id)
partner_to = values.pop('partner_to', '')
if partner_to:
# placeholders could generate '', 3, 2 due to some empty field values
tpl_partner_ids = [int(pid) for pid in partner_to.split(',') if pid]
partner_ids += self.env['res.partner'].sudo().browse(tpl_partner_ids).exists().ids
results[res_id]['partner_ids'] = partner_ids
return results
@api.multi
def generate_email(self, res_ids, fields=None):
"""Generates an email from the template for given the given model based on
records given by res_ids.
:param template_id: id of the template to render.
:param res_id: id of the record to use for rendering the template (model
is taken from template definition)
:returns: a dict containing all relevant fields for creating a new
mail.mail entry, with one extra key ``attachments``, in the
format [(report_name, data)] where data is base64 encoded.
"""
self.ensure_one()
multi_mode = True
if isinstance(res_ids, (int, long)):
res_ids = [res_ids]
multi_mode = False
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to']
res_ids_to_templates = self.get_email_template_batch(res_ids)
# templates: res_id -> template; template -> res_ids
templates_to_res_ids = {}
for res_id, template in res_ids_to_templates.iteritems():
templates_to_res_ids.setdefault(template, []).append(res_id)
results = dict()
for template, template_res_ids in templates_to_res_ids.iteritems():
Template = self.env['mail.template']
# generate fields value for all res_ids linked to the current template
if template.lang:
Template = Template.with_context(lang=template._context.get('lang'))
for field in fields:
generated_field_values = Template.render_template(
getattr(template, field), template.model, template_res_ids,
post_process=(field == 'body_html'))
for res_id, field_value in generated_field_values.iteritems():
results.setdefault(res_id, dict())[field] = field_value
# compute recipients
if any(field in fields for field in ['email_to', 'partner_to', 'email_cc']):
results = template.generate_recipients(results, template_res_ids)
# update values for all res_ids
for res_id in template_res_ids:
values = results[res_id]
# body: add user signature, sanitize
if 'body_html' in fields and template.user_signature:
signature = self.env.user.signature
if signature:
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
if values.get('body_html'):
values['body'] = tools.html_sanitize(values['body_html'])
# technical settings
values.update(
mail_server_id=template.mail_server_id.id or False,
auto_delete=template.auto_delete,
model=template.model,
res_id=res_id or False,
attachment_ids=[attach.id for attach in template.attachment_ids],
)
# Add report in attachments: generate once for all template_res_ids
if template.report_template:
for res_id in template_res_ids:
attachments = []
report_name = self.render_template(template.report_name, template.model, res_id)
report = template.report_template
report_service = report.report_name
if report.report_type in ['qweb-html', 'qweb-pdf']:
result, format = self.pool['report'].get_pdf(self._cr, self._uid, [res_id], report_service, context=Template._context), 'pdf'
else:
result, format = odoo_report.render_report(self._cr, self._uid, [res_id], report_service, {'model': template.model}, Template._context)
# TODO in trunk, change return format to binary to match message_post expected format
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
ext = "." + format
if not report_name.endswith(ext):
report_name += ext
attachments.append((report_name, result))
results[res_id]['attachments'] = attachments
return multi_mode and results or results[res_ids[0]]
@api.multi
def send_mail(self, res_id, force_send=False, raise_exception=False):
"""Generates a new mail message for the given template and record,
and schedules it for delivery through the ``mail`` module's scheduler.
:param int res_id: id of the record to render the template with
(model is taken from the template)
:param bool force_send: if True, the generated mail.message is
immediately sent after being created, as if the scheduler
was executed for this message only.
:returns: id of the mail.message that was created
"""
self.ensure_one()
Mail = self.env['mail.mail']
Attachment = self.env['ir.attachment'] # TDE FIXME: should remove dfeault_type from context
# create a mail_mail based on values, without attachments
values = self.generate_email(res_id)
values['recipient_ids'] = [(4, pid) for pid in values.get('partner_ids', list())]
attachment_ids = values.pop('attachment_ids', [])
attachments = values.pop('attachments', [])
# add a protection against void email_from
if 'email_from' in values and not values.get('email_from'):
values.pop('email_from')
mail = Mail.create(values)
# manage attachments
for attachment in attachments:
attachment_data = {
'name': attachment[0],
'datas_fname': attachment[0],
'datas': attachment[1],
'res_model': 'mail.message',
'res_id': mail.mail_message_id.id,
}
attachment_ids.append(Attachment.create(attachment_data).id)
if attachment_ids:
values['attachment_ids'] = [(6, 0, attachment_ids)]
mail.write({'attachment_ids': [(6, 0, attachment_ids)]})
if force_send:
mail.send(raise_exception=raise_exception)
return mail.id # TDE CLEANME: return mail + api.returns ?
# compatibility
render_template_batch = render_template
get_email_template_batch = get_email_template
generate_email_batch = generate_email
# Compatibility method
# def render_template(self, cr, uid, template, model, res_id, context=None):
# return self.render_template_batch(cr, uid, template, model, [res_id], context)[res_id]
# def get_email_template(self, cr, uid, template_id=False, record_id=None, context=None):
# return self.get_email_template_batch(cr, uid, template_id, [record_id], context)[record_id]
# def generate_email(self, cr, uid, template_id, res_id, context=None):
# return self.generate_email_batch(cr, uid, template_id, [res_id], context)[res_id]
|
gpl-3.0
|
itjp/w2p_js_separation
|
languages/sk.py
|
161
|
6877
|
# coding: utf8
{
'!langcode!': 'sk',
'!langname!': 'Slovenský',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" je voliteľný výraz ako "field1=\'newvalue\'". Nemôžete upravovať alebo zmazať výsledky JOINu',
'%s %%{row} deleted': '%s zmazaných záznamov',
'%s %%{row} updated': '%s upravených záznamov',
'%s selected': '%s označených',
'%Y-%m-%d': '%d.%m.%Y',
'%Y-%m-%d %H:%M:%S': '%d.%m.%Y %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'pre administrátorské rozhranie kliknite sem',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'appadmin je zakázaný bez zabezpečeného spojenia',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'Dostupné databázy a tabuľky',
'Buy this book': 'Buy this book',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Nemôže byť prázdne',
'Check to delete': 'Označiť na zmazanie',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Aktuálna požiadavka',
'Current response': 'Aktuálna odpoveď',
'Current session': 'Aktuálne sedenie',
'customize me!': 'prispôsob ma!',
'data uploaded': 'údaje naplnené',
'Database': 'databáza',
'Database %s select': 'databáza %s výber',
'db': 'db',
'DB Model': 'DB Model',
'Delete:': 'Zmazať:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Popis',
'design': 'návrh',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Dokumentácia',
"Don't know what to do?": "Don't know what to do?",
'done!': 'hotovo!',
'Download': 'Download',
'Edit': 'Upraviť',
'Edit current record': 'Upraviť aktuálny záznam',
'Edit Profile': 'Upraviť profil',
'Email and SMS': 'Email and SMS',
'Errors': 'Errors',
'export as csv file': 'exportovať do csv súboru',
'FAQ': 'FAQ',
'First name': 'Krstné meno',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group ID': 'ID skupiny',
'Groups': 'Groups',
'Hello World': 'Ahoj svet',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Import/Export',
'Index': 'Index',
'insert new': 'vložiť nový záznam ',
'insert new %s': 'vložiť nový záznam %s',
'Internal State': 'Vnútorný stav',
'Introduction': 'Introduction',
'Invalid email': 'Neplatný email',
'Invalid password': 'Nesprávne heslo',
'Invalid Query': 'Neplatná otázka',
'invalid request': 'Neplatná požiadavka',
'Key': 'Key',
'Last name': 'Priezvisko',
'Layout': 'Layout',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'Logged in': 'Prihlásený',
'Logged out': 'Odhlásený',
'login': 'prihlásiť',
'logout': 'odhlásiť',
'Lost Password': 'Stratené heslo?',
'lost password?': 'stratené heslo?',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu Model',
'My Sites': 'My Sites',
'Name': 'Meno',
'New password': 'Nové heslo',
'New Record': 'Nový záznam',
'new record inserted': 'nový záznam bol vložený',
'next 100 rows': 'ďalších 100 riadkov',
'No databases in this application': 'V tejto aplikácii nie sú databázy',
'Old password': 'Staré heslo',
'Online examples': 'pre online príklady kliknite sem',
'or import from csv file': 'alebo naimportovať z csv súboru',
'Origin': 'Pôvod',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'password': 'heslo',
'Password': 'Heslo',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'predchádzajúcich 100 riadkov',
'Python': 'Python',
'Query:': 'Otázka:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'záznam',
'record does not exist': 'záznam neexistuje',
'Record ID': 'ID záznamu',
'Record id': 'id záznamu',
'Register': 'Zaregistrovať sa',
'register': 'registrovať',
'Registration key': 'Registračný kľúč',
'Remember me (for 30 days)': 'Zapamätaj si ma (na 30 dní)',
'Reset Password key': 'Nastaviť registračný kľúč',
'Role': 'Rola',
'Rows in Table': 'riadkov v tabuľke',
'Rows selected': 'označených riadkov',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'stav',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Submit': 'Odoslať',
'Support': 'Support',
'Sure you want to delete this object?': 'Ste si istí, že chcete zmazať tento objekt?',
'Table': 'tabuľka',
'Table name': 'Názov tabuľky',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query" je podmienka ako "db.table1.field1==\'value\'". Niečo ako "db.table1.field1==db.table2.field2" má za výsledok SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'Výstup zo súboru je slovník, ktorý bol zobrazený vo view %s',
'The Views': 'The Views',
'This App': 'This App',
'This is a copy of the scaffolding application': 'Toto je kópia skeletu aplikácie',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Časová pečiatka',
'Twitter': 'Twitter',
'unable to parse csv file': 'nedá sa načítať csv súbor',
'Update:': 'Upraviť:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Použite (...)&(...) pre AND, (...)|(...) pre OR a ~(...) pre NOT na poskladanie komplexnejších otázok.',
'User %(id)s Logged-in': 'Používateľ %(id)s prihlásený',
'User %(id)s Logged-out': 'Používateľ %(id)s odhlásený',
'User %(id)s Password changed': 'Používateľ %(id)s zmenil heslo',
'User %(id)s Profile updated': 'Používateľ %(id)s upravil profil',
'User %(id)s Registered': 'Používateľ %(id)s sa zaregistroval',
'User ID': 'ID používateľa',
'Verify Password': 'Zopakujte heslo',
'Videos': 'Videos',
'View': 'Zobraziť',
'Welcome to web2py': 'Vitajte vo web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Ktorý zavolal funkciu %s nachádzajúci sa v súbore %s',
'You are successfully running web2py': 'Úspešne ste spustili web2py',
'You can modify this application and adapt it to your needs': 'Môžete upraviť túto aplikáciu a prispôsobiť ju svojim potrebám',
'You visited the url %s': 'Navštívili ste URL %s',
}
|
lgpl-3.0
|
google-research/dice_rl
|
estimators/tabular_bayes_dice.py
|
1
|
10379
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.specs import tensor_spec
from tf_agents.policies import tf_policy
from typing import Any, Callable, Iterable, Optional, Sequence, Text, Tuple, Union
import dice_rl.data.dataset as dataset_lib
import dice_rl.utils.common as common_lib
import dice_rl.estimators.estimator as estimator_lib
class TabularBayesDice(object):
"""Robust policy evaluation."""
def __init__(self,
dataset_spec,
gamma: Union[float, tf.Tensor],
reward_fn: Optional[Callable] = None,
solve_for_state_action_ratio: bool = True,
nu_learning_rate: Union[float, tf.Tensor] = 0.1,
zeta_learning_rate: Union[float, tf.Tensor] = 0.1,
kl_regularizer: Union[float, tf.Tensor] = 1.,
eps_std: Union[float, tf.Tensor] = 1):
"""Initializes the solver.
Args:
dataset_spec: The spec of the dataset that will be given.
gamma: The discount factor to use.
reward_fn: A function that takes in an EnvStep and returns the reward for
that step. If not specified, defaults to just EnvStep.reward.
solve_for_state_action_ratio: Whether to solve for state-action density
ratio. Defaults to True. When solving an environment with a large
state/action space (taxi), better to set this to False to avoid OOM
issues.
nu_learning_rate: Learning rate for nu.
zeta_learning_rate: Learning rate for zeta.
kl_regularizer: Regularization constant for D_kl(q || p).
eps_std: epsilon standard deviation for sampling from the posterior.
"""
self._dataset_spec = dataset_spec
self._gamma = gamma
if reward_fn is None:
reward_fn = lambda env_step: env_step.reward
self._reward_fn = reward_fn
self._kl_regularizer = kl_regularizer
self._eps_std = eps_std
self._solve_for_state_action_ratio = solve_for_state_action_ratio
if (not self._solve_for_state_action_ratio and
not self._dataset_spec.has_log_probability()):
raise ValueError('Dataset must contain log-probability when '
'solve_for_state_action_ratio is False.')
# Get number of states/actions.
observation_spec = self._dataset_spec.observation
action_spec = self._dataset_spec.action
if not common_lib.is_categorical_spec(observation_spec):
raise ValueError('Observation spec must be discrete and bounded.')
self._num_states = observation_spec.maximum + 1
if not common_lib.is_categorical_spec(action_spec):
raise ValueError('Action spec must be discrete and bounded.')
self._num_actions = action_spec.maximum + 1
self._dimension = (
self._num_states * self._num_actions
if self._solve_for_state_action_ratio else self._num_states)
self._td_residuals = np.zeros([self._dimension, self._dimension])
self._total_weights = np.zeros([self._dimension])
self._initial_weights = np.zeros([self._dimension])
self._nu_optimizer = tf.keras.optimizers.Adam(nu_learning_rate)
self._zeta_optimizer = tf.keras.optimizers.Adam(zeta_learning_rate)
# Initialize variational Bayes parameters
self._nu_mu = tf.Variable(tf.zeros([self._dimension]))
self._nu_log_sigma = tf.Variable(tf.zeros([self._dimension]))
self._prior_mu = tf.Variable(tf.zeros([self._dimension]), trainable=True)
self._prior_log_sigma = tf.Variable(
tf.zeros([self._dimension]), trainable=False)
def _get_index(self, state, action):
if self._solve_for_state_action_ratio:
return state * self._num_actions + action
else:
return state
def prepare_dataset(self, dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy):
episodes, valid_steps = dataset.get_all_episodes()
tfagents_episodes = dataset_lib.convert_to_tfagents_timestep(episodes)
for episode_num in range(tf.shape(valid_steps)[0]):
# Precompute probabilites for this episode.
this_episode = tf.nest.map_structure(lambda t: t[episode_num], episodes)
first_step = tf.nest.map_structure(lambda t: t[0], this_episode)
this_tfagents_episode = dataset_lib.convert_to_tfagents_timestep(
this_episode)
episode_target_log_probabilities = target_policy.distribution(
this_tfagents_episode).action.log_prob(this_episode.action)
episode_target_probs = target_policy.distribution(
this_tfagents_episode).action.probs_parameter()
for step_num in range(tf.shape(valid_steps)[1] - 1):
this_step = tf.nest.map_structure(lambda t: t[episode_num, step_num],
episodes)
next_step = tf.nest.map_structure(
lambda t: t[episode_num, step_num + 1], episodes)
if this_step.is_last() or not valid_steps[episode_num, step_num]:
continue
weight = 1.0
nu_index = self._get_index(this_step.observation, this_step.action)
self._td_residuals[nu_index, nu_index] += -weight
self._total_weights[nu_index] += weight
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
policy_ratio = tf.exp(episode_target_log_probabilities[step_num] -
this_step.get_log_probability())
# Need to weight next nu by importance weight.
next_weight = (
weight if self._solve_for_state_action_ratio else policy_ratio *
weight)
next_probs = episode_target_probs[step_num + 1]
for next_action, next_prob in enumerate(next_probs):
next_nu_index = self._get_index(next_step.observation, next_action)
self._td_residuals[next_nu_index, nu_index] += (
next_prob * self._gamma * next_weight)
initial_probs = episode_target_probs[0]
for initial_action, initial_prob in enumerate(initial_probs):
initial_nu_index = self._get_index(first_step.observation,
initial_action)
self._initial_weights[initial_nu_index] += weight * initial_prob
self._initial_weights = tf.cast(self._initial_weights, tf.float32)
self._total_weights = tf.cast(self._total_weights, tf.float32)
self._td_residuals = self._td_residuals / np.sqrt(
1e-8 + self._total_weights)[None, :]
self._td_errors = tf.cast(
np.dot(self._td_residuals, self._td_residuals.T), tf.float32)
self._td_residuals = tf.cast(self._td_residuals, tf.float32)
@tf.function
def train_step(self, regularizer: float = 1e-6):
# Solve primal form min (1-g) * E[nu0] + E[(B nu - nu)^2].
with tf.GradientTape() as tape:
nu_sigma = tf.sqrt(tf.exp(self._nu_log_sigma))
eps = tf.random.normal(tf.shape(nu_sigma), 0, self._eps_std)
nu = self._nu_mu + nu_sigma * eps
init_nu_loss = tf.einsum('m,m', (1 - self._gamma) * self._initial_weights,
nu)
residuals = tf.einsum('n,nm->m', nu, self._td_residuals)
bellman_loss = 0.5 * tf.einsum('m,m', residuals, residuals)
prior_sigma = tf.sqrt(tf.exp(self._prior_log_sigma))
prior_var = tf.square(prior_sigma)
prior_var = 1.
neg_kl = (0.5 * (1. - 2. * tf.math.log(prior_sigma / nu_sigma + 1e-8) -
(self._nu_mu - self._prior_mu)**2 / prior_var -
nu_sigma**2 / prior_var))
loss = init_nu_loss + bellman_loss - self._kl_regularizer * neg_kl
grads = tape.gradient(loss, [
self._nu_mu, self._nu_log_sigma, self._prior_mu, self._prior_log_sigma
])
self._nu_optimizer.apply_gradients(
zip(grads, [
self._nu_mu, self._nu_log_sigma, self._prior_mu,
self._prior_log_sigma
]))
return loss
def estimate_average_reward(self,
dataset: dataset_lib.OffpolicyDataset,
target_policy: tf_policy.TFPolicy,
num_samples=100):
"""Estimates value (average per-step reward) of policy.
The estimation is based on solved values of zeta, so one should call
solve() before calling this function.
Args:
dataset: The dataset to sample experience from.
target_policy: The policy whose value we want to estimate.
num_samples: number of posterior samples.
Returns:
A tensor with num_samples samples of estimated average per-step reward
of the target policy.
"""
nu_sigma = tf.sqrt(tf.exp(self._nu_log_sigma))
eps = tf.random.normal(
tf.concat([[num_samples], tf.shape(nu_sigma)], axis=-1), 0,
self._eps_std)
nu = self._nu_mu + nu_sigma * eps
self._zeta = (
tf.einsum('bn,nm->bm', nu, self._td_residuals) /
tf.math.sqrt(1e-8 + self._total_weights))
def weight_fn(env_step):
index = self._get_index(env_step.observation, env_step.action)
zeta = tf.gather(
self._zeta, tf.tile(index[None, :], [num_samples, 1]), batch_dims=1)
policy_ratio = 1.0
if not self._solve_for_state_action_ratio:
tfagents_timestep = dataset_lib.convert_to_tfagents_timestep(env_step)
target_log_probabilities = target_policy.distribution(
tfagents_timestep).action.log_prob(env_step.action)
policy_ratio = tf.exp(target_log_probabilities -
env_step.get_log_probability())
return tf.cast(zeta * policy_ratio, tf.float32)
return estimator_lib.get_fullbatch_average(
dataset,
limit=None,
by_steps=True,
reward_fn=self._reward_fn,
weight_fn=weight_fn)
|
apache-2.0
|
Diegojnb/JdeRobot
|
src/drivers/MAVLinkServer/MAVProxy/modules/lib/mavmemlog.py
|
8
|
3580
|
'''in-memory mavlink log'''
from pymavlink import mavutil
class mavmemlog(mavutil.mavfile):
'''a MAVLink log in memory. This allows loading a log into
memory to make it easier to do multiple sweeps over a log'''
def __init__(self, mav, progress_callback=None):
mavutil.mavfile.__init__(self, None, 'memlog')
self._msgs = []
self._count = 0
self.rewind()
self._flightmodes = []
last_flightmode = None
last_timestamp = None
last_pct = 0
while True:
m = mav.recv_msg()
if m is None:
break
if int(mav.percent) != last_pct and progress_callback:
progress_callback(int(mav.percent))
last_pct = int(mav.percent)
self._msgs.append(m)
if mav.flightmode != last_flightmode:
if len(self._flightmodes) > 0:
(mode, t1, t2) = self._flightmodes[-1]
self._flightmodes[-1] = (mode, t1, m._timestamp)
self._flightmodes.append((mav.flightmode, m._timestamp, None))
last_flightmode = mav.flightmode
self._count += 1
last_timestamp = m._timestamp
self.check_param(m)
if last_timestamp is not None and len(self._flightmodes) > 0:
(mode, t1, t2) = self._flightmodes[-1]
self._flightmodes[-1] = (mode, t1, last_timestamp)
def recv_msg(self):
'''message receive routine'''
if self._index >= self._count:
return None
m = self._msgs[self._index]
type = m.get_type()
self._index += 1
self.percent = (100.0 * self._index) / self._count
self.messages[type] = m
self._timestamp = m._timestamp
if self._flightmode_index < len(self._flightmodes):
(mode, tstamp, t2) = self._flightmodes[self._flightmode_index]
if m._timestamp >= tstamp:
self.flightmode = mode
self._flightmode_index += 1
self.check_param(m)
return m
def check_param(self, m):
type = m.get_type()
if type == 'PARAM_VALUE':
s = str(m.param_id)
self.params[str(m.param_id)] = m.param_value
elif type == 'PARM' and getattr(m, 'Name', None) is not None:
self.params[m.Name] = m.Value
def rewind(self):
'''rewind to start'''
self._index = 0
self.percent = 0
self.messages = {}
self._flightmode_index = 0
self._timestamp = None
self.flightmode = None
self.params = {}
def flightmode_list(self):
'''return list of all flightmodes as tuple of mode and start time'''
return self._flightmodes
def reduce_by_flightmodes(self, flightmode_selections):
'''reduce data using flightmode selections'''
if len(flightmode_selections) == 0:
return
all_false = True
for s in flightmode_selections:
if s:
all_false = False
if all_false:
# treat all false as all modes wanted'''
return
new_msgs = []
idx = 0
for m in self._msgs:
while idx < len(self._flightmodes) and m._timestamp >= self._flightmodes[idx][2]:
idx += 1
if idx < len(flightmode_selections) and flightmode_selections[idx]:
new_msgs.append(m)
self._msgs = new_msgs
self._count = len(new_msgs)
self.rewind()
|
gpl-3.0
|
Yukarumya/Yukarum-Redfoxes
|
media/webrtc/trunk/tools/gyp/test/win/gyptest-link-enable-winrt-app-revision.py
|
41
|
1180
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_application_type_revision works correctly.
"""
import TestGyp
import os
import sys
import struct
CHDIR = 'winrt-app-type-revision'
print 'This test is not currently working on the bots: https://code.google.com/p/gyp/issues/detail?id=466'
sys.exit(0)
if (sys.platform == 'win32' and
int(os.environ.get('GYP_MSVS_VERSION', 0)) == 2013):
test = TestGyp.TestGyp(formats=['msvs'])
test.run_gyp('winrt-app-type-revision.gyp', chdir=CHDIR)
test.build('winrt-app-type-revision.gyp', 'enable_winrt_81_revision_dll',
chdir=CHDIR)
# Revision is set to 8.2 which is invalid for 2013 projects so compilation
# must fail.
test.build('winrt-app-type-revision.gyp', 'enable_winrt_82_revision_dll',
chdir=CHDIR, status=1)
# Revision is set to an invalid value for 2013 projects so compilation
# must fail.
test.build('winrt-app-type-revision.gyp', 'enable_winrt_invalid_revision_dll',
chdir=CHDIR, status=1)
test.pass_test()
|
mpl-2.0
|
joram/sickbeard-orange
|
lib/hachoir_parser/network/tcpdump.py
|
90
|
16398
|
"""
Tcpdump parser
Source:
* libpcap source code (file savefile.c)
* RFC 791 (IPv4)
* RFC 792 (ICMP)
* RFC 793 (TCP)
* RFC 1122 (Requirements for Internet Hosts)
Author: Victor Stinner
Creation: 23 march 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Enum, Bytes, NullBytes, RawBytes,
UInt8, UInt16, UInt32, Int32, TimestampUnix32,
Bit, Bits, NullBits)
from lib.hachoir_core.endian import NETWORK_ENDIAN, LITTLE_ENDIAN
from lib.hachoir_core.tools import humanDuration
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.tools import createDict
from lib.hachoir_parser.network.common import MAC48_Address, IPv4_Address, IPv6_Address
def diff(field):
return humanDuration(field.value*1000)
class Layer(FieldSet):
endian = NETWORK_ENDIAN
def parseNext(self, parent):
return None
class ARP(Layer):
opcode_name = {
1: "request",
2: "reply"
}
endian = NETWORK_ENDIAN
def createFields(self):
yield UInt16(self, "hw_type")
yield UInt16(self, "proto_type")
yield UInt8(self, "hw_size")
yield UInt8(self, "proto_size")
yield Enum(UInt16(self, "opcode"), ARP.opcode_name)
yield MAC48_Address(self, "src_mac")
yield IPv4_Address(self, "src_ip")
yield MAC48_Address(self, "dst_mac")
yield IPv4_Address(self, "dst_ip")
def createDescription(self):
desc = "ARP: %s" % self["opcode"].display
opcode = self["opcode"].value
src_ip = self["src_ip"].display
dst_ip = self["dst_ip"].display
if opcode == 1:
desc += ", %s ask %s" % (dst_ip, src_ip)
elif opcode == 2:
desc += " from %s" % src_ip
return desc
class TCP_Option(FieldSet):
NOP = 1
MAX_SEGMENT = 2
WINDOW_SCALE = 3
SACK = 4
TIMESTAMP = 8
code_name = {
NOP: "NOP",
MAX_SEGMENT: "Max segment size",
WINDOW_SCALE: "Window scale",
SACK: "SACK permitted",
TIMESTAMP: "Timestamp"
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
if self["code"].value != self.NOP:
self._size = self["length"].value * 8
else:
self._size = 8
def createFields(self):
yield Enum(UInt8(self, "code", "Code"), self.code_name)
code = self["code"].value
if code == self.NOP:
return
yield UInt8(self, "length", "Option size in bytes")
if code == self.MAX_SEGMENT:
yield UInt16(self, "max_seg", "Maximum segment size")
elif code == self.WINDOW_SCALE:
yield UInt8(self, "win_scale", "Window scale")
elif code == self.TIMESTAMP:
yield UInt32(self, "ts_val", "Timestamp value")
yield UInt32(self, "ts_ecr", "Timestamp echo reply")
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
def createDescription(self):
return "TCP option: %s" % self["code"].display
class TCP(Layer):
port_name = {
13: "daytime",
20: "ftp data",
21: "ftp",
23: "telnet",
25: "smtp",
53: "dns",
63: "dhcp/bootp",
80: "HTTP",
110: "pop3",
119: "nntp",
123: "ntp",
139: "netbios session service",
1863: "MSNMS",
6667: "IRC"
}
def createFields(self):
yield Enum(UInt16(self, "src"), self.port_name)
yield Enum(UInt16(self, "dst"), self.port_name)
yield UInt32(self, "seq_num")
yield UInt32(self, "ack_num")
yield Bits(self, "hdrlen", 6, "Header lenght")
yield NullBits(self, "reserved", 2, "Reserved")
yield Bit(self, "cgst", "Congestion Window Reduced")
yield Bit(self, "ecn-echo", "ECN-echo")
yield Bit(self, "urg", "Urgent")
yield Bit(self, "ack", "Acknowledge")
yield Bit(self, "psh", "Push mmode")
yield Bit(self, "rst", "Reset connection")
yield Bit(self, "syn", "Synchronize")
yield Bit(self, "fin", "Stop the connection")
yield UInt16(self, "winsize", "Windows size")
yield textHandler(UInt16(self, "checksum"), hexadecimal)
yield UInt16(self, "urgent")
size = self["hdrlen"].value*8 - self.current_size
while 0 < size:
option = TCP_Option(self, "option[]")
yield option
size -= option.size
def parseNext(self, parent):
return None
def createDescription(self):
src = self["src"].value
dst = self["dst"].value
if src < 32768:
src = self["src"].display
else:
src = None
if dst < 32768:
dst = self["dst"].display
else:
dst = None
desc = "TCP"
if src != None and dst != None:
desc += " (%s->%s)" % (src, dst)
elif src != None:
desc += " (%s->)" % (src)
elif dst != None:
desc += " (->%s)" % (dst)
# Get flags
flags = []
if self["syn"].value:
flags.append("SYN")
if self["ack"].value:
flags.append("ACK")
if self["fin"].value:
flags.append("FIN")
if self["rst"].value:
flags.append("RST")
if flags:
desc += " [%s]" % (",".join(flags))
return desc
class UDP(Layer):
port_name = {
12: "daytime",
22: "ssh",
53: "DNS",
67: "dhcp/bootp",
80: "http",
110: "pop3",
123: "ntp",
137: "netbios name service",
138: "netbios datagram service"
}
def createFields(self):
yield Enum(UInt16(self, "src"), UDP.port_name)
yield Enum(UInt16(self, "dst"), UDP.port_name)
yield UInt16(self, "length")
yield textHandler(UInt16(self, "checksum"), hexadecimal)
def createDescription(self):
return "UDP (%s->%s)" % (self["src"].display, self["dst"].display)
class ICMP(Layer):
REJECT = 3
PONG = 0
PING = 8
type_desc = {
PONG: "Pong",
REJECT: "Reject",
PING: "Ping"
}
reject_reason = {
0: "net unreachable",
1: "host unreachable",
2: "protocol unreachable",
3: "port unreachable",
4: "fragmentation needed and DF set",
5: "source route failed",
6: "Destination network unknown error",
7: "Destination host unknown error",
8: "Source host isolated error",
9: "Destination network administratively prohibited",
10: "Destination host administratively prohibited",
11: "Unreachable network for Type Of Service",
12: "Unreachable host for Type Of Service.",
13: "Communication administratively prohibited",
14: "Host precedence violation",
15: "Precedence cutoff in effect"
}
def createFields(self):
# Type
yield Enum(UInt8(self, "type"), self.type_desc)
type = self["type"].value
# Code
field = UInt8(self, "code")
if type == 3:
field = Enum(field, self.reject_reason)
yield field
# Options
yield textHandler(UInt16(self, "checksum"), hexadecimal)
if type in (self.PING, self.PONG): # and self["code"].value == 0:
yield UInt16(self, "id")
yield UInt16(self, "seq_num")
# follow: ping data
elif type == self.REJECT:
yield NullBytes(self, "empty", 2)
yield UInt16(self, "hop_mtu", "Next-Hop MTU")
def createDescription(self):
type = self["type"].value
if type in (self.PING, self.PONG):
return "%s (num=%s)" % (self["type"].display, self["seq_num"].value)
else:
return "ICMP (%s)" % self["type"].display
def parseNext(self, parent):
if self["type"].value == self.REJECT:
return IPv4(parent, "rejected_ipv4")
else:
return None
class ICMPv6(Layer):
ECHO_REQUEST = 128
ECHO_REPLY = 129
TYPE_DESC = {
128: "Echo request",
129: "Echo reply",
}
def createFields(self):
yield Enum(UInt8(self, "type"), self.TYPE_DESC)
yield UInt8(self, "code")
yield textHandler(UInt16(self, "checksum"), hexadecimal)
if self['type'].value in (self.ECHO_REQUEST, self.ECHO_REPLY):
yield UInt16(self, "id")
yield UInt16(self, "sequence")
def createDescription(self):
if self['type'].value in (self.ECHO_REQUEST, self.ECHO_REPLY):
return "%s (num=%s)" % (self["type"].display, self["sequence"].value)
else:
return "ICMPv6 (%s)" % self["type"].display
class IP(Layer):
PROTOCOL_INFO = {
1: ("icmp", ICMP, "ICMP"),
6: ("tcp", TCP, "TCP"),
17: ("udp", UDP, "UDP"),
58: ("icmpv6", ICMPv6, "ICMPv6"),
60: ("ipv6_opts", None, "IPv6 destination option"),
}
PROTOCOL_NAME = createDict(PROTOCOL_INFO, 2)
def parseNext(self, parent):
proto = self["protocol"].value
if proto not in self.PROTOCOL_INFO:
return None
name, parser, desc = self.PROTOCOL_INFO[proto]
if not parser:
return None
return parser(parent, name)
class IPv4(IP):
precedence_name = {
7: "Network Control",
6: "Internetwork Control",
5: "CRITIC/ECP",
4: "Flash Override",
3: "Flash",
2: "Immediate",
1: "Priority",
0: "Routine",
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["hdr_size"].value * 32
def createFields(self):
yield Bits(self, "version", 4, "Version")
yield Bits(self, "hdr_size", 4, "Header size divided by 5")
# Type of service
yield Enum(Bits(self, "precedence", 3, "Precedence"), self.precedence_name)
yield Bit(self, "low_delay", "If set, low delay, else normal delay")
yield Bit(self, "high_throu", "If set, high throughput, else normal throughput")
yield Bit(self, "high_rel", "If set, high relibility, else normal")
yield NullBits(self, "reserved[]", 2, "(reserved for future use)")
yield UInt16(self, "length")
yield UInt16(self, "id")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "df", "Don't fragment")
yield Bit(self, "more_frag", "There are more fragments? if not set, it's the last one")
yield Bits(self, "frag_ofst_lo", 5)
yield UInt8(self, "frag_ofst_hi")
yield UInt8(self, "ttl", "Type to live")
yield Enum(UInt8(self, "protocol"), self.PROTOCOL_NAME)
yield textHandler(UInt16(self, "checksum"), hexadecimal)
yield IPv4_Address(self, "src")
yield IPv4_Address(self, "dst")
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "options", size)
def createDescription(self):
return "IPv4 (%s>%s)" % (self["src"].display, self["dst"].display)
class IPv6(IP):
static_size = 40 * 8
endian = NETWORK_ENDIAN
def createFields(self):
yield Bits(self, "version", 4, "Version (6)")
yield Bits(self, "traffic", 8, "Traffic class")
yield Bits(self, "flow", 20, "Flow label")
yield Bits(self, "length", 16, "Payload length")
yield Enum(Bits(self, "protocol", 8, "Next header"), self.PROTOCOL_NAME)
yield Bits(self, "hop_limit", 8, "Hop limit")
yield IPv6_Address(self, "src")
yield IPv6_Address(self, "dst")
def createDescription(self):
return "IPv6 (%s>%s)" % (self["src"].display, self["dst"].display)
class Layer2(Layer):
PROTO_INFO = {
0x0800: ("ipv4", IPv4, "IPv4"),
0x0806: ("arp", ARP, "ARP"),
0x86dd: ("ipv6", IPv6, "IPv6"),
}
PROTO_DESC = createDict(PROTO_INFO, 2)
def parseNext(self, parent):
try:
name, parser, desc = self.PROTO_INFO[ self["protocol"].value ]
return parser(parent, name)
except KeyError:
return None
class Unicast(Layer2):
packet_type_name = {
0: "Unicast to us"
}
def createFields(self):
yield Enum(UInt16(self, "packet_type"), self.packet_type_name)
yield UInt16(self, "addr_type", "Link-layer address type")
yield UInt16(self, "addr_length", "Link-layer address length")
length = self["addr_length"].value
length = 8 # FIXME: Should we use addr_length or not?
if length:
yield RawBytes(self, "source", length)
yield Enum(UInt16(self, "protocol"), self.PROTO_DESC)
class Ethernet(Layer2):
static_size = 14*8
def createFields(self):
yield MAC48_Address(self, "dst")
yield MAC48_Address(self, "src")
yield Enum(UInt16(self, "protocol"), self.PROTO_DESC)
def createDescription(self):
return "Ethernet: %s>%s (%s)" % \
(self["src"].display, self["dst"].display, self["protocol"].display)
class Packet(FieldSet):
endian = LITTLE_ENDIAN
def __init__(self, parent, name, parser, first_name):
FieldSet.__init__(self, parent, name)
self._size = (16 + self["caplen"].value) * 8
self._first_parser = parser
self._first_name = first_name
def createFields(self):
yield TimestampUnix32(self, "ts_epoch", "Timestamp (Epoch)")
yield UInt32(self, "ts_nanosec", "Timestamp (nano second)")
yield UInt32(self, "caplen", "length of portion present")
yield UInt32(self, "len", "length this packet (off wire)")
# Read different layers
field = self._first_parser(self, self._first_name)
while field:
yield field
field = field.parseNext(self)
# Read data if any
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "data", size)
def getTimestamp(self):
nano_sec = float(self["ts_nanosec"].value) / 100
from datetime import timedelta
return self["ts_epoch"].value + timedelta(microseconds=nano_sec)
def createDescription(self):
t0 = self["/packet[0]"].getTimestamp()
# ts = max(self.getTimestamp() - t0, t0)
ts = self.getTimestamp() - t0
#text = ["%1.6f: " % ts]
text = ["%s: " % ts]
if "icmp" in self:
text.append(self["icmp"].description)
elif "tcp" in self:
text.append(self["tcp"].description)
elif "udp" in self:
text.append(self["udp"].description)
elif "arp" in self:
text.append(self["arp"].description)
else:
text.append("Packet")
return "".join(text)
class TcpdumpFile(Parser):
PARSER_TAGS = {
"id": "tcpdump",
"category": "misc",
"min_size": 24*8,
"description": "Tcpdump file (network)",
"magic": (("\xd4\xc3\xb2\xa1", 0),),
}
endian = LITTLE_ENDIAN
LINK_TYPE = {
1: ("ethernet", Ethernet),
113: ("unicast", Unicast),
}
LINK_TYPE_DESC = createDict(LINK_TYPE, 0)
def validate(self):
if self["id"].value != "\xd4\xc3\xb2\xa1":
return "Wrong file signature"
if self["link_type"].value not in self.LINK_TYPE:
return "Unknown link type"
return True
def createFields(self):
yield Bytes(self, "id", 4, "Tcpdump identifier")
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield Int32(self, "this_zone", "GMT to local time zone correction")
yield Int32(self, "sigfigs", "accuracy of timestamps")
yield UInt32(self, "snap_len", "max length saved portion of each pkt")
yield Enum(UInt32(self, "link_type", "data link type"), self.LINK_TYPE_DESC)
link = self["link_type"].value
if link not in self.LINK_TYPE:
raise ParserError("Unknown link type: %s" % link)
name, parser = self.LINK_TYPE[link]
while self.current_size < self.size:
yield Packet(self, "packet[]", parser, name)
|
gpl-3.0
|
TiagoBras/svg2code
|
svg2code/svg_parser.py
|
1
|
16075
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import re
import xml.etree.ElementTree as ElementTree
from svg2code.svg_colors import SVG_COLORS
from svg2code.helpers import parseSVGNumber as parseNumber
from os import path
class RGBAColor(object):
"""rgba color format: [0.0, 1.0]"""
def __init__(self, r=0.0, g=0.0, b=0.0, a=0.0):
super(RGBAColor, self).__init__()
self.r = float(r)
self.g = float(g)
self.b = float(b)
self.a = float(a)
@property
def components(self):
return [self.r, self.g, self.b, self.a]
def __eq__(self, o):
return self.r == o.r and self.g == o.g and self.b == o.b and self.a == o.a
def __ne__(self, o):
return not (self == o)
class SVGNode(object):
def __init__(self, xml, parent=None, isDrawable=False):
super(SVGNode, self).__init__()
self.parent = parent
self.depth = 0 if parent is None else parent.depth + 1
self.id = self._genId(xml, parent)
self.transform = self._parseTransform(xml)
self.isDrawable = isDrawable
# print(("-"*self.depth) + re.sub(r'\{[^\}]+\}', '', xml.tag) + " " + self.id or self.parent.id)
self.style = parent.style.copy() if parent is not None else {
"fill": "black"
}
self.style.update(self._parseStyle(xml))
if parent is not None:
self.transform = parent.transform * self.transform
self.children = self._parseChildren(xml, self)
def _genId(self, xml, parent):
newId = xml.attrib.get("id")
if newId is not None:
return newId
if parent is None:
return ""
newId = parent.id
m = re.search(r'(?P<n>\d+)$', newId)
if m is not None:
return re.sub(r'\d+$', str(int(m.group('n')) + 1), newId)
else:
return newId + '1' if parent.isDrawable else newId
def _parseTransform(self, xml):
if "transform" in xml.attrib:
if not xml.attrib["transform"].startswith("matrix"):
raise TypeError("%s does not start with matrix" % xml.attrib["transform"])
f = [float(x) for x in xml.attrib["transform"][7:-1].split(",")]
return M.withComponents(*f)
else:
return M()
def _parseStyle(self, xml):
if "style" in xml.attrib:
styleArray = [x for x in xml.attrib["style"].split(';') if len(x) > 0]
style = {}
for el in styleArray:
key, value = el.split(':')
style[key] = value
return style
else:
return {}
def _parseChildren(self, xml, parent):
children = []
for child in xml:
tag = re.sub(r'\{[^\}]+\}', '', child.tag)
if tag == "g":
children.append(SVGGroup(child, parent))
else:
children.append(SVGPath(child, parent))
return children
def _getColorForKey(self, colorKey):
color = self.style.get(colorKey)
if color is None:
return None
color = SVG_COLORS.get(color) or color
if color == 'inherit':
return None
elif color == "none":
return RGBAColor(0.0, 0.0, 0.0, 0.0)
elif color.startswith("rgb("):
components = [int(x) / 255.0 for x in color[4:-1].split(",")]
components.append(self.style.get(colorKey + "_opacity", 1.0))
return RGBAColor(*components)
raise NotImplementedError("%s color format is not implemented" % fillColor)
@property
def isVisible(self):
fillColor = self.rgbaFillColor or RGBAColor(0.0, 0.0, 0.0, 0.0)
strokeColor = self.rgbaStrokeColor or RGBAColor(0.0, 0.0, 0.0, 0.0)
return fillColor.a > 0.0 or strokeColor.a > 0.0
@property
def rgbaFillColor(self):
return self._getColorForKey("fill")
@property
def rgbaStrokeColor(self):
return self._getColorForKey("stroke")
@property
def strokeWidth(self):
width = self.style.get("stroke-width")
if width is None or width == "inherit":
return None
return parseNumber(width)
@property
def strokeLineCap(self):
cap = self.style.get("stroke-linecap")
return None if cap is None or cap == "inherit" else cap
@property
def strokeMiterLimit(self):
miter = self.style.get("stroke-miterlimit")
return None if miter is None or miter == "inherit" else miter
@property
def usesEvenOddFillRule(self):
return self.style.get("fill-rule", "nonzero") == "evenodd"
class SVG(SVGNode):
def __init__(self, xml, name):
super(SVG, self).__init__(xml)
self.x, self.y, self.width, self.height = self._parseViewBox(xml)
width = xml.attrib.get("width", None)
if width is not None:
if width.endswith("%"):
self.width = parseNumber(width) * self.width / 100.0
else:
self.width = parseNumber(width)
height = xml.attrib.get("height", None)
if height is not None:
if height.endswith("%"):
self.height = parseNumber(height) * self.height / 100.0
else:
self.height = parseNumber(height)
self.name = name
@classmethod
def fromFile(cls, filepath):
tree = ElementTree.parse(filepath)
return cls(tree.getroot(), path.splitext(path.basename(filepath))[0])
@classmethod
def fromString(cls, string, name):
root = ElementTree.fromstring(string)
return cls(root, name)
@property
def iterator(self):
"""Depth-First iterator"""
stack = [self]
while stack:
node = stack.pop()
yield node
for child in reversed(node.children):
stack.append(child)
@property
def paths(self):
for node in self.iterator:
if isinstance(node, SVGPath) and node.isDrawable and node.isVisible:
yield node
def _parseViewBox(self, xml):
if "viewBox" in xml.attrib:
return [parseNumber(x) for x in xml.attrib["viewBox"].split(' ')]
else:
return [0, 0, 0, 0]
class SVGGroup(SVGNode):
def __init__(self, xml, parent=None):
super(SVGGroup, self).__init__(xml, parent)
TRAILING_ZEROS_RE = re.compile(r'\.?0+$')
def removeTrailingZeros(s):
result = TRAILING_ZEROS_RE.sub('', s) if '.' in s else s
# return result if '.' in result else result + '.0'
return result
class SVGPath(SVGNode):
def __init__(self, xml, parent=None):
super(SVGPath, self).__init__(xml, parent, True)
tag = re.sub(r'\{[^\}]+\}', '', xml.tag)
d = xml.attrib.get("d")
if tag == "rect":
d = self._rectToPath(xml)
elif tag == "ellipse":
d = self._ellipseToPath(xml)
elif tag == "circle":
d = self._ellipseToPath(xml)
if d is None:
raise NotImplementedError("'%s' path converter is not yet implemented" % tag)
self.commands = self._parseCommands(d)
def _parseCommands(self, d):
COMMANDS_RE = re.compile(r"([MmZzLlHhVvCcSsQqTtAa])")
tokens = [x for x in COMMANDS_RE.split(d) if len(x) > 0]
i = 0
commands = []
while i < len(tokens):
command = tokens[i]
coordinates = tokens[i+1] if i+1 < len(tokens) else None
if command.islower():
raise TypeError("%s is relative." % command)
if command == 'M':
i += 2
commands.append(MoveTo.fromSVGString(coordinates).withTransform(self.transform))
elif command == 'L':
i += 2
commands.append(LineTo.fromSVGString(coordinates).withTransform(self.transform))
elif command == 'C':
i += 2
commands.append(CurveTo.fromSVGString(coordinates).withTransform(self.transform))
elif command == 'Z':
i += 1
commands.append(ClosePath())
else:
raise NotImplementedError("Command %s is not implemented" % command)
return commands
def _rectToPath(self, xml):
x = float(xml.attrib.get("x", 0.0))
y = float(xml.attrib.get("y", 0.0))
width = float(xml.attrib.get("width", 0.0))
height = float(xml.attrib.get("height", 0.0))
points = [x, y, x + width, y, x + width, y + height, x, y + height]
a, b, c, d, e, f, g, h = [removeTrailingZeros(str(n)) for n in points]
return "M%s,%sL%s,%sL%s,%sL%s,%sZ" % (a, b, c, d, e, f, g, h)
def _ellipseToPath(self, xml):
cx = float(xml.attrib.get("cx", 0.0))
cy = float(xml.attrib.get("cy", 0.0))
rx = float(xml.attrib.get("rx", xml.attrib.get("r", 0.0)))
ry = float(xml.attrib.get("ry", xml.attrib.get("r", 0.0)))
K = 0.5522847498307935
cdX, cdY = rx * K, ry * K
points1 = [cx, cy - ry]
points2 = [cx + rx, cy, cx + cdX, cy - ry, cx + rx, cy - cdY]
points3 = [cx, cy + ry, cx + rx, cy + cdY, cx + cdX, cy + ry]
points4 = [cx - rx, cy, cx - cdX, cy + ry, cx - rx, cy + cdY]
points5 = [cx, cy - ry, cx - rx, cy - cdY, cx - cdX, cy - ry]
px, py = [removeTrailingZeros(str(n)) for n in points1]
p1x, p1y, p1x1, p1y1, p1x2, p1y2 = [removeTrailingZeros(str(n)) for n in points2]
p2x, p2y, p2x1, p2y1, p2x2, p2y2 = [removeTrailingZeros(str(n)) for n in points3]
p3x, p3y, p3x1, p3y1, p3x2, p3y2 = [removeTrailingZeros(str(n)) for n in points4]
p4x, p4y, p4x1, p4y1, p4x2, p4y2 = [removeTrailingZeros(str(n)) for n in points5]
return "M{},{}C{},{} {},{} {},{}".format(px, py, p1x1, p1y1, p1x2, p1y2, p1x, p1y) + \
"C{},{} {},{} {},{}".format(p2x1, p2y1, p2x2, p2y2, p2x, p2y) + \
"C{},{} {},{} {},{}".format(p3x1, p3y1, p3x2, p3y2, p3x, p3y) + \
"C{},{} {},{} {},{}Z".format(p4x1, p4y1, p4x2, p4y2, p4x, p4y)
class MoveTo(object):
def __init__(self, x, y):
super(MoveTo, self).__init__()
self.x = float(x)
self.y = float(y)
def __repr__(self):
return "(M %f,%f)" % (self.x, self.y)
@classmethod
def fromSVGString(cls, string):
xy = string.split(',')
return cls(xy[0], xy[1])
def withTransform(self, transform):
p = P(self.x, self.y).applyTransform(transform)
return MoveTo(p.x, p.y)
class LineTo(object):
def __init__(self, x, y):
super(LineTo, self).__init__()
self.x = float(x)
self.y = float(y)
def __repr__(self):
return "(L %f,%f)" % (self.x, self.y)
@classmethod
def fromSVGString(cls, string):
xy = string.split(',')
return cls(xy[0], xy[1])
def withTransform(self, transform):
p = P(self.x, self.y).applyTransform(transform)
return LineTo(p.x, p.y)
class CurveTo(object):
def __init__(self, x, y, x1, y1, x2, y2):
super(CurveTo, self).__init__()
self.x = float(x)
self.y = float(y)
self.x1 = float(x1)
self.y1 = float(y1)
self.x2 = float(x2)
self.y2 = float(y2)
def __repr__(self):
return "(C %f,%f %f,%f %f,%f)" % (self.x1, self.y1, self.x2, self.y2, self.x, self.y)
@classmethod
def fromSVGString(cls, string):
x1y1x2y2xy = [x.split(',') for x in string.split(' ')]
x1 = x1y1x2y2xy[0][0]
y1 = x1y1x2y2xy[0][1]
x2 = x1y1x2y2xy[1][0]
y2 = x1y1x2y2xy[1][1]
x = x1y1x2y2xy[2][0]
y = x1y1x2y2xy[2][1]
return cls(x, y, x1, y1, x2, y2)
def withTransform(self, transform):
p = P(self.x, self.y).applyTransform(transform)
p1 = P(self.x1, self.y1).applyTransform(transform)
p2 = P(self.x2, self.y2).applyTransform(transform)
return CurveTo(p.x, p.y, p1.x, p1.y, p2.x, p2.y)
class ClosePath(object):
def __init__(self):
super(ClosePath, self).__init__()
def __repr__(self):
return "(Z)"
class P(object):
def __init__(self, x=0.0, y=0.0, z=1.0):
self.x, self.y, self.z = float(x), float(y), float(z)
def __repr__(self):
return "(%f, %f, %f)" % (self.x, self.y, self.z)
def applyTransform(self, m):
x = m.a*self.x + m.c*self.y + m.e*self.z
y = m.b*self.x + m.d*self.y + m.f*self.z
z = m[2][0]*self.z + m[2][1]*self.y + m[2][2]*self.z
return P(x, y, z)
def normalizeForSize(self, maxWidth, maxHeight, maxDepth=1.0):
self.x /= maxWidth
self.y /= maxHeight
self.z /= maxDepth
class M(object):
def __init__(self, array=None, **kwargs):
super(M, self).__init__()
self.array = array or [[1.0, 0.0, 0.0],[0.0, 1.0, 0.0],[0.0, 0.0, 1.0]]
if array is None and len(kwargs) > 0:
self.a = kwargs.get("a", 1.0)
self.b = kwargs.get("b", 0.0)
self.c = kwargs.get("c", 0.0)
self.d = kwargs.get("d", 1.0)
self.e = kwargs.get("e", 0.0)
self.f = kwargs.get("f", 0.0)
for r in range(0, 3):
for c in range(0, 3):
self.array[r][c] = float(self.array[r][c])
@property
def isIdentity(self):
for r in range(0, 3):
for c in range(0, 3):
if r == c and self.array[r][c] != 1.0:
return False
if r != c and self.array[r][c] != 0.0:
return False
return True
@classmethod
def withComponents(cls, *args):
if len(args) != 6:
raise ValueError("Should these 6 components: a, b, c, d, e, f")
m = cls(a=args[0], b=args[1], c=args[2], d=args[3], e=args[4], f=args[5])
return m
def __repr__(self):
maxLen = 0
arrayStr = []
for r in range(0, 3):
arrayStr.append(["", "", ""])
for c in range(0, 3):
s = str(self[r][c])
sLen = len(s)
if sLen > maxLen:
maxLen = sLen
arrayStr[r][c] = s
f = '{:>' + str(maxLen) + 's} {:>' + str(maxLen) + 's} {:>' + str(maxLen) + 's}'
return "\n".join([f.format(r[0], r[1], r[2]) for r in arrayStr])
@property
def a(self): return self.array[0][0]
@a.setter
def a(self, a): self.array[0][0] = float(a)
@property
def b(self): return self.array[1][0]
@b.setter
def b(self, b): self.array[1][0] = float(b)
@property
def c(self): return self.array[0][1]
@c.setter
def c(self, c): self.array[0][1] = float(c)
@property
def d(self): return self.array[1][1]
@d.setter
def d(self, d): self.array[1][1] = float(d)
@property
def e(self): return self.array[0][2]
@e.setter
def e(self, e): self.array[0][2] = float(e)
@property
def f(self): return self.array[1][2]
@f.setter
def f(self, f): self.array[1][2] = float(f)
def __getitem__(self, index):
return self.array[index]
def __len__(self):
return len(self.array)
def __mul__(self, other):
rows_A = len(self)
cols_A = len(self[0])
rows_B = len(other)
cols_B = len(other[0])
if cols_A != rows_B:
print("Cannot multiply the two matrices. Incorrect dimensions.")
return
C = [[0 for row in range(cols_B)] for col in range(rows_A)]
for i in range(rows_A):
for j in range(cols_B):
for k in range(cols_A):
C[i][j] += self[i][k] * other[k][j]
return M(C)
|
mit
|
int19h/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Lib/encodings/mac_romanian.py
|
272
|
13661
|
""" Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-romanian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\xb4' # 0xAB -> ACUTE ACCENT
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE
'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
'\u221e' # 0xB0 -> INFINITY
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\xa5' # 0xB4 -> YEN SIGN
'\xb5' # 0xB5 -> MICRO SIGN
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u220f' # 0xB8 -> N-ARY PRODUCT
'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
'\u222b' # 0xBA -> INTEGRAL
'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE
'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
'\xbf' # 0xC0 -> INVERTED QUESTION MARK
'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
'\u2248' # 0xC5 -> ALMOST EQUAL TO
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
'\u2044' # 0xDA -> FRACTION SLASH
'\u20ac' # 0xDB -> EURO SIGN
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
'\u2021' # 0xE0 -> DOUBLE DAGGER
'\xb7' # 0xE1 -> MIDDLE DOT
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u2030' # 0xE4 -> PER MILLE SIGN
'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\uf8ff' # 0xF0 -> Apple logo
'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u02dc' # 0xF7 -> SMALL TILDE
'\xaf' # 0xF8 -> MACRON
'\u02d8' # 0xF9 -> BREVE
'\u02d9' # 0xFA -> DOT ABOVE
'\u02da' # 0xFB -> RING ABOVE
'\xb8' # 0xFC -> CEDILLA
'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
'\u02db' # 0xFE -> OGONEK
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
nikolas/lettuce
|
tests/integration/lib/Django-1.2.5/django/contrib/formtools/utils.py
|
64
|
1261
|
try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.utils.hashcompat import md5_constructor
from django.forms import BooleanField
def security_hash(request, form, *args):
"""
Calculates a security hash for the given Form instance.
This creates a list of the form field names/values in a deterministic
order, pickles the result with the SECRET_KEY setting, then takes an md5
hash of that.
"""
data = []
for bf in form:
# Get the value from the form data. If the form allows empty or hasn't
# changed then don't call clean() to avoid trigger validation errors.
if form.empty_permitted and not form.has_changed():
value = bf.data or ''
else:
value = bf.field.clean(bf.data) or ''
if isinstance(value, basestring):
value = value.strip()
data.append((bf.name, value))
data.extend(args)
data.append(settings.SECRET_KEY)
# Use HIGHEST_PROTOCOL because it's the most efficient. It requires
# Python 2.3, but Django requires 2.4 anyway, so that's OK.
pickled = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)
return md5_constructor(pickled).hexdigest()
|
gpl-3.0
|
dlazz/ansible
|
lib/ansible/modules/cloud/vmware/vmware_host_powermgmt_policy.py
|
43
|
8768
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Christian Kotte <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_powermgmt_policy
short_description: Manages the Power Management Policy of an ESXI host system
description:
- This module can be used to manage the Power Management Policy of ESXi host systems in given vCenter infrastructure.
version_added: 2.8
author:
- Christian Kotte (@ckotte) <[email protected]>
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
policy:
description:
- Set the Power Management Policy of the host system.
choices: [ 'high-performance', 'balanced', 'low-power', 'custom' ]
default: 'balanced'
esxi_hostname:
description:
- Name of the host system to work with.
- This is required parameter if C(cluster_name) is not specified.
cluster_name:
description:
- Name of the cluster from which all host systems will be used.
- This is required parameter if C(esxi_hostname) is not specified.
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Set the Power Management Policy of a host system to high-performance
vmware_host_powermgmt_policy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_host }}'
policy: high-performance
validate_certs: no
delegate_to: localhost
- name: Set the Power Management Policy of all host systems from cluster to high-performance
vmware_host_powermgmt_policy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
policy: high-performance
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
result:
description: metadata about host system's Power Management Policy
returned: always
type: dict
sample: {
"changed": true,
"result": {
"esxi01": {
"changed": true,
"current_state": "high-performance",
"desired_state": "high-performance",
"msg": "Power policy changed",
"previous_state": "balanced"
}
}
}
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import PyVmomi, vmware_argument_spec
from ansible.module_utils._text import to_native
class VmwareHostPowerManagement(PyVmomi):
"""
Class to manage power management policy of an ESXi host system
"""
def __init__(self, module):
super(VmwareHostPowerManagement, self).__init__(module)
cluster_name = self.params.get('cluster_name')
esxi_host_name = self.params.get('esxi_hostname')
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
if not self.hosts:
self.module.fail_json(msg="Failed to find host system with given configuration.")
def ensure(self):
"""
Manage power management policy of an ESXi host system
"""
results = dict(changed=False, result=dict())
policy = self.params.get('policy')
host_change_list = []
power_policies = {
'high-performance': {
'key': 1,
'short_name': 'static'
},
'balanced': {
'key': 2,
'short_name': 'dynamic'
},
'low-power': {
'key': 3,
'short_name': 'low'
},
'custom': {
'key': 4,
'short_name': 'custom'
}
}
for host in self.hosts:
changed = False
results['result'][host.name] = dict(msg='')
power_system = host.configManager.powerSystem
# get current power policy
power_system_info = power_system.info
current_host_power_policy = power_system_info.currentPolicy
# the "name" and "description" parameters are pretty useless
# they store only strings containing "PowerPolicy.<shortName>.name" and "PowerPolicy.<shortName>.description"
if current_host_power_policy.shortName == "static":
current_policy = 'high-performance'
elif current_host_power_policy.shortName == "dynamic":
current_policy = 'balanced'
elif current_host_power_policy.shortName == "low":
current_policy = 'low-power'
elif current_host_power_policy.shortName == "custom":
current_policy = 'custom'
results['result'][host.name]['desired_state'] = policy
# Don't do anything if the power policy is already configured
if current_host_power_policy.key == power_policies[policy]['key']:
results['result'][host.name]['changed'] = changed
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = policy
results['result'][host.name]['msg'] = "Power policy is already configured"
else:
# get available power policies and check if policy is included
supported_policy = False
power_system_capability = power_system.capability
available_host_power_policies = power_system_capability.availablePolicy
for available_policy in available_host_power_policies:
if available_policy.shortName == power_policies[policy]['short_name']:
supported_policy = True
if supported_policy:
if not self.module.check_mode:
try:
power_system.ConfigurePowerPolicy(key=power_policies[policy]['key'])
changed = True
results['result'][host.name]['changed'] = True
results['result'][host.name]['msg'] = "Power policy changed"
except vmodl.fault.InvalidArgument:
self.module.fail_json(msg="Invalid power policy key provided for host '%s'" % host.name)
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to configure power policy for host '%s': %s" %
(host.name, to_native(host_config_fault.msg)))
else:
changed = True
results['result'][host.name]['changed'] = True
results['result'][host.name]['msg'] = "Power policy will be changed"
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = policy
else:
changed = False
results['result'][host.name]['changed'] = changed
results['result'][host.name]['previous_state'] = current_policy
results['result'][host.name]['current_state'] = current_policy
self.module.fail_json(msg="Power policy '%s' isn't supported for host '%s'" %
(policy, host.name))
host_change_list.append(changed)
if any(host_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
"""
Main
"""
argument_spec = vmware_argument_spec()
argument_spec.update(
policy=dict(type='str', default='balanced',
choices=['high-performance', 'balanced', 'low-power', 'custom']),
esxi_hostname=dict(type='str', required=False),
cluster_name=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True
)
host_power_management = VmwareHostPowerManagement(module)
host_power_management.ensure()
if __name__ == '__main__':
main()
|
gpl-3.0
|
ansible/ansible
|
examples/scripts/my_test_info.py
|
29
|
3120
|
#!/usr/bin/python
# Copyright: (c) 2020, Your Name <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: my_test_info
short_description: This is my test info module
version_added: "1.0.0"
description: This is my longer description explaining my test info module.
options:
name:
description: This is the message to send to the test module.
required: true
type: str
author:
- Your Name (@yourGitHubHandle)
'''
EXAMPLES = r'''
# Pass in a message
- name: Test with a message
my_namespace.my_collection.my_test_info:
name: hello world
'''
RETURN = r'''
# These are examples of possible return values, and in general should use other names for return values.
original_message:
description: The original name param that was passed in.
type: str
returned: always
sample: 'hello world'
message:
description: The output message that the test module generates.
type: str
returned: always
sample: 'goodbye'
my_useful_info:
description: The dictionary containing information about your system.
type: dict
returned: always
sample: {
'foo': 'bar',
'answer': 42,
}
'''
from ansible.module_utils.basic import AnsibleModule
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
name=dict(type='str', required=True),
)
# seed the result dict in the object
# we primarily care about changed and state
# changed is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
original_message='',
message='',
my_useful_info={},
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
module.exit_json(**result)
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
result['original_message'] = module.params['name']
result['message'] = 'goodbye'
result['my_useful_info'] = {
'foo': 'bar',
'answer': 42,
}
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
gpl-3.0
|
GiggleLiu/nrg_mapping
|
nrgmap/discmodel.py
|
1
|
5812
|
'''
The Discretized model, or the sun model.
'''
from numpy import *
from scipy import sparse as sps
import pdb
__all__=['DiscModel','load_discmodel']
class DiscModel(object):
'''
Discrete model class.
Construct:
DiscModel((Elist_neg,Elist_pos),(Tlist_neg,Tlist_pos),z=1.)
z could be one or an array of float >0 but <=1.
Attributes:
:Elist_neg/Elist_pos/Elist(readonly)/Tlist_neg/Tlist_pos/Tlist(readonly): An array of on-site energies and hopping terms.
* The shape of Elist_neg/Tlist_neg is (N_neg,nz,nband,nband)
* The shape of Elist_pos/Tlist_pos is (N_pos,nz,nband,nband)
* The shape of Elist/Tlist is (N_pos+N_neg,nz,nband,nband)
:z: The twisting parameters.
:nz: The number of z-numbers(readonly)
:nband: The number of bands(readonly)
:N_neg/N_pos/N: The number of intervals for negative/positive/total band(readonly)
:is_scalar: Is a single band scalar model if True(readonly)
'''
def __init__(self,Elists,Tlists,z=1.):
if ndim(z)==0:
self.z=array([z])
elif ndim(z)==1:
self.z=array(z)
else:
raise Exception('z must be an 1D array or a Number !')
if any(z)>1. or any(z<=0.):
raise Exception('z must greater than 0 and small or equal to 1!')
assert(shape(Elists[0])==shape(Tlists[1]))
assert(shape(Elists[0])==shape(Tlists[1]))
assert(ndim(Elists[0])==2 or ndim(Elists[0])==4) #when ndim=2, the last 2 dimensions are ignored(scalar).
self.Tlist_neg,self.Tlist_pos=Tlists
self.Elist_neg,self.Elist_pos=Elists
@property
def nz(self):
'''number of twisting parameters.'''
return len(self.z)
@property
def N_pos(self):
'''The total number of particles in positive branch'''
return len(self.Tlist_neg)
@property
def N_neg(self):
'''The total number of particles in negative branch'''
return len(self.Tlist_pos)
@property
def N(self):
'''The total number of particles'''
return self.N_neg+self.N_pos
@property
def nband(self):
'''number of bands.'''
if self.is_scalar:
return 1
return self.Elist_pos.shape[-1]
@property
def is_scalar(self):
'''is a scalar model(no matrix representation of on-site energies and hopping) or not.'''
return ndim(self.Elist_pos)==2
@property
def Elist(self):
'''The list of on-site energies.'''
return concatenate([self.Elist_neg[::-1],self.Elist_pos],axis=0)
@property
def Tlist(self):
'''The list of on-site energies.'''
return concatenate([self.Tlist_neg[::-1],self.Tlist_pos],axis=0)
def get_H0(self,e0,iz):
'''Get the hamiltonian.'''
N=self.N+1
mat=ndarray((N,N),dtype='O')
elist,tlist=self.Elist[:,iz],self.Tlist[:,iz]
#fill datas
mat[0,0]=e0
for i in range(1,N):
ti=tlist[i-1]
mat[i,i]=elist[i-1]
mat[0,i]=ti.T.conj()
mat[i,0]=ti
return sps.bmat(mat).toarray()
def save(self,file_prefix):
'''
Save data.
Parameters:
:file_prefix: The target filename prefix.
**Note:**
For the scalar model mapped from SingleBandDiscHandler, with z=[0.3,0.7] and 2 sites for each-branch.
The data file `negfile`(`posfile`) looks like:
| E1(z=0.7)
| E1(z=0.7)
| E2(z=0.7)
| E2(z=0.7)
| T1(z=0.7)
| T1(z=0.7)
| T2(z=0.7)
| T2(z=0.7)
However, for the multi-band model, the parameters are allowed to take imaginary parts,
Now, the data file for a two band model looks like:
| E1[0,0].real, E1[0,0].imag, E1[0,1].real, E1[0,1].imag, E1[1,0].real, E1[1,0].imag, E1[1,1].real, E1[1,1].imag #z=0.3
| ...
It will take 8 columns to store each matrix element.
'''
N_neg,N_pos=self.N_neg,self.N_pos
nband=self.nband
nz=self.nz
zfile='%s.z.dat'%(file_prefix)
negfile='%s.neg.dat'%(file_prefix)
posfile='%s.pos.dat'%(file_prefix)
if self.is_scalar:
negdata=concatenate([self.Elist_neg,self.Tlist_neg]).real.reshape([2*N_neg*nz])
posdata=concatenate([self.Elist_pos,self.Tlist_pos]).real.reshape([2*N_pos*nz])
else:
negdata=complex128(concatenate([self.Elist_neg,self.Tlist_neg])).view('float64').reshape([2*N_neg*nz,nband**2*2])
posdata=complex128(concatenate([self.Elist_pos,self.Tlist_pos])).view('float64').reshape([2*N_pos*nz,nband**2*2])
savetxt(negfile,negdata)
savetxt(posfile,posdata)
savetxt(zfile,self.z)
def load_discmodel(file_prefix):
'''
Load specific data.
Parameters:
:file_prefix: The target filename prefix.
Return:
A <DiscModel> instance.
'''
zfile='%s.z.dat'%(file_prefix)
negfile='%s.neg.dat'%(file_prefix)
posfile='%s.pos.dat'%(file_prefix)
z=loadtxt(zfile)
nz=len(atleast_1d(z))
negdata=loadtxt(negfile)
posdata=loadtxt(posfile)
if ndim(negdata)==1:
negdata=negdata.reshape([-1,nz])
posdata=posdata.reshape([-1,nz])
else:
#the matrix version contains complex numbers.
nband=sqrt(negdata.shape[1]/2).astype('int32')
negdata=negdata.view('complex128').reshape([-1,nz,nband,nband])
posdata=posdata.view('complex128').reshape([-1,nz,nband,nband])
Elist_neg,Tlist_neg=split(negdata,2)
Elist_pos,Tlist_pos=split(posdata,2)
return DiscModel(Elists=(Elist_neg,Elist_pos),Tlists=(Tlist_neg,Tlist_pos),z=z)
|
mit
|
salomon1184/bite-project
|
server/models/compat/run_tester_map.py
|
17
|
3398
|
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RunTesterMap model and related logic.
RunTesterMap stores the relationship between a CompatRun
and a User. It tracks the runs a user is subscribed to.
"""
__author__ = '[email protected] (Alexis O. Torres)'
from google.appengine.api import memcache
from google.appengine.ext import db
from models.compat import run as compat_run
class RunTesterMap(db.Model):
"""Tracks the relationship between a CompatRun and a User."""
run = db.ReferenceProperty(compat_run.CompatRun)
user = db.UserProperty(required=True)
def GetMappingKeyName(run, user):
"""Returns a str used to uniquely identify a mapping."""
return 'RunTesterMap_%s_%s' % (run.key().name(), str(user.user_id()))
def GetMappingKey(run, user):
"""Returns the unique db.Key object for the given a run and user."""
return db.Key.from_path('RunTesterMap', GetMappingKeyName(run, user))
def AddMapping(run, user):
"""Adds a new mapping between a given run and a user."""
def _Txn():
mapping = RunTesterMap(key_name=GetMappingKeyName(run, user),
user=user, run=run)
mapping.put()
# Update memcache mappings for user.
memcache.delete(GetMappingsForTesterKeyName(user))
return mapping
return db.run_in_transaction(_Txn)
def RemoveMapping(run, user):
"""Removes given mapping between run and user."""
def _Txn():
db.delete(GetMappingKey(run, user))
# Invalidate memcache mappings for user.
memcache.delete(GetMappingsForTesterKeyName(user))
db.run_in_transaction(_Txn)
def GetMappingsForTesterKeyName(user):
"""Returns a str used to uniquely identify mappings for a given user.."""
return 'RunTesterMap_Tester_%s' % str(user.user_id())
def _PrefetchRefprops(entities, *props):
"""Prefetches reference properties on the given list of entities."""
fields = [(entity, prop) for entity in entities for prop in props]
ref_keys = [prop.get_value_for_datastore(x) for x, prop in fields]
ref_entities = dict((x.key(), x) for x in db.get(set(ref_keys)))
for (entity, prop), ref_key in zip(fields, ref_keys):
prop.__set__(entity, ref_entities[ref_key])
return entities
def GetMappingsForTester(user, prefetch_ref_properties=True):
"""Returns a list of mappings associated with the given user.."""
cache_key = GetMappingsForTesterKeyName(user)
mappings = None #memcache.get(cache_key)
if mappings is None:
runs = compat_run.GetRuns()
keys = [GetMappingKey(run, user) for run in runs]
mappings = RunTesterMap.get(keys)
if mappings:
# Remove keys not found, eg. [None, None, None] -> []
mappings = filter(lambda item: item is not None, mappings)
memcache.set(cache_key, mappings)
if prefetch_ref_properties:
return _PrefetchRefprops(mappings, RunTesterMap.run)
else:
return mappings
|
apache-2.0
|
cleverhans-lab/cleverhans
|
cleverhans/experimental/certification/tests/dual_formulation_test.py
|
2
|
8080
|
"""Tests for cleverhans.experimental.certification.dual_formulation."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
from cleverhans.experimental.certification import dual_formulation
from cleverhans.experimental.certification import nn
class DualFormulationTest(unittest.TestCase):
def test_init(self):
# Function to test initialization of dual formulation class.
net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
net_biases = [
np.transpose(np.matrix([0, 0, 0])),
np.transpose(np.matrix([0, 0])),
]
net_layer_types = ["ff_relu", "ff"]
nn_params1 = nn.NeuralNetwork(net_weights, net_biases, net_layer_types)
test_input = np.transpose(np.matrix([0, 0]))
true_class = 0
adv_class = 1
input_minval = 0
input_maxval = 0
epsilon = 0.1
three_dim_tensor = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
two_dim_tensor = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
scalar = tf.random_uniform(shape=(1, 1), dtype=tf.float32)
lambda_pos = [two_dim_tensor, three_dim_tensor]
lambda_neg = lambda_pos
lambda_quad = lambda_pos
lambda_lu = lambda_pos
nu = scalar
dual_var = {
"lambda_pos": lambda_pos,
"lambda_neg": lambda_neg,
"lambda_quad": lambda_quad,
"lambda_lu": lambda_lu,
"nu": nu,
}
with tf.Session() as sess:
dual_formulation_object = dual_formulation.DualFormulation(
sess,
dual_var,
nn_params1,
test_input,
true_class,
adv_class,
input_minval,
input_maxval,
epsilon,
)
self.assertIsNotNone(dual_formulation_object)
def test_set_differentiable_objective(self):
# Function to test the function that sets the differentiable objective.
net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
net_biases = [
np.transpose(np.matrix([0, 0, 0])),
np.transpose(np.matrix([0, 0])),
]
net_layer_types = ["ff_relu", "ff"]
nn_params1 = nn.NeuralNetwork(net_weights, net_biases, net_layer_types)
test_input = np.transpose(np.matrix([0, 0]))
true_class = 0
adv_class = 1
input_minval = 0
input_maxval = 0
epsilon = 0.1
three_dim_tensor = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
two_dim_tensor = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
scalar = tf.random_uniform(shape=(1, 1), dtype=tf.float32)
lambda_pos = [two_dim_tensor, three_dim_tensor]
lambda_neg = lambda_pos
lambda_quad = lambda_pos
lambda_lu = lambda_pos
nu = scalar
dual_var = {
"lambda_pos": lambda_pos,
"lambda_neg": lambda_neg,
"lambda_quad": lambda_quad,
"lambda_lu": lambda_lu,
"nu": nu,
}
with tf.Session() as sess:
dual_formulation_object = dual_formulation.DualFormulation(
sess,
dual_var,
nn_params1,
test_input,
true_class,
adv_class,
input_minval,
input_maxval,
epsilon,
)
dual_formulation_object.set_differentiable_objective()
self.assertEqual(dual_formulation_object.scalar_f.shape.as_list(), [1])
self.assertEqual(
dual_formulation_object.unconstrained_objective.shape.as_list(), [1, 1]
)
self.assertEqual(dual_formulation_object.vector_g.shape.as_list(), [5, 1])
def test_get_full_psd_matrix(self):
# Function to test product with PSD matrix.
net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
net_biases = [
np.transpose(np.matrix([0, 0, 0])),
np.transpose(np.matrix([0, 0])),
]
net_layer_types = ["ff_relu", "ff"]
nn_params1 = nn.NeuralNetwork(net_weights, net_biases, net_layer_types)
test_input = np.transpose(np.matrix([0, 0]))
true_class = 0
adv_class = 1
input_minval = 0
input_maxval = 0
epsilon = 0.1
three_dim_tensor = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
two_dim_tensor = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
scalar = tf.random_uniform(shape=(1, 1), dtype=tf.float32)
lambda_pos = [two_dim_tensor, three_dim_tensor]
lambda_neg = lambda_pos
lambda_quad = lambda_pos
lambda_lu = lambda_pos
nu = scalar
dual_var = {
"lambda_pos": lambda_pos,
"lambda_neg": lambda_neg,
"lambda_quad": lambda_quad,
"lambda_lu": lambda_lu,
"nu": nu,
}
with tf.Session() as sess:
dual_formulation_object = dual_formulation.DualFormulation(
sess,
dual_var,
nn_params1,
test_input,
true_class,
adv_class,
input_minval,
input_maxval,
epsilon,
)
matrix_h, matrix_m = dual_formulation_object.get_full_psd_matrix()
self.assertEqual(matrix_h.shape.as_list(), [5, 5])
self.assertEqual(matrix_m.shape.as_list(), [6, 6])
def test_get_psd_product(self):
# Function to test implicit product with PSD matrix.
net_weights = [[[2, 2], [3, 3], [4, 4]], [[1, 1, 1], [-1, -1, -1]]]
net_biases = [
np.transpose(np.matrix([0, 0, 0])),
np.transpose(np.matrix([0, 0])),
]
net_layer_types = ["ff_relu", "ff"]
nn_params1 = nn.NeuralNetwork(net_weights, net_biases, net_layer_types)
test_input = np.transpose(np.matrix([0, 0]))
true_class = 0
adv_class = 1
input_minval = 0
input_maxval = 0
epsilon = 0.1
three_dim_tensor = tf.random_uniform(shape=(3, 1), dtype=tf.float32)
two_dim_tensor = tf.random_uniform(shape=(2, 1), dtype=tf.float32)
scalar = tf.random_uniform(shape=(1, 1), dtype=tf.float32)
lambda_pos = [two_dim_tensor, three_dim_tensor]
lambda_neg = lambda_pos
lambda_quad = lambda_pos
lambda_lu = lambda_pos
nu = scalar
dual_var = {
"lambda_pos": lambda_pos,
"lambda_neg": lambda_neg,
"lambda_quad": lambda_quad,
"lambda_lu": lambda_lu,
"nu": nu,
}
with tf.Session() as sess:
dual_formulation_object = dual_formulation.DualFormulation(
sess,
dual_var,
nn_params1,
test_input,
true_class,
adv_class,
input_minval,
input_maxval,
epsilon,
)
_, matrix_m = dual_formulation_object.get_full_psd_matrix()
# Testing if the values match
six_dim_tensor = tf.random_uniform(shape=(6, 1), dtype=tf.float32)
implicit_product = dual_formulation_object.get_psd_product(six_dim_tensor)
explicit_product = tf.matmul(matrix_m, six_dim_tensor)
[implicit_product_value, explicit_product_value] = sess.run(
[implicit_product, explicit_product]
)
self.assertEqual(
np.shape(implicit_product_value), np.shape(explicit_product_value)
)
self.assertLess(
np.max(np.abs(implicit_product_value - explicit_product_value)), 1e-5
)
if __name__ == "__main__":
unittest.main()
|
mit
|
kntem/webdeposit
|
modules/webjournal/lib/webjournal_unit_tests.py
|
2
|
2782
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for WebJournal."""
__revision__ = \
"$Id$"
# pylint invenio/modules/webjournal/lib/webjournal_tests.py
from invenio.importutils import lazy_import
from invenio.testutils import make_test_suite, run_test_suite, InvenioTestCase
issue_is_later_than = lazy_import('invenio.webjournal:issue_is_later_than')
compare_issues = lazy_import('invenio.webjournal_utils:compare_issues')
class TestCompareIssues(InvenioTestCase):
"""Tests for comparing issues."""
def test_compare_issues(self):
"""webjournal - tests comparing issues"""
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), 0)
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
def test_issue1_is_later_than(self):
"""webjournal - tests comparing issue1 is later than issue2 """
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
TEST_SUITE = make_test_suite(TestCompareIssues)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
gpl-2.0
|
epandurski/django
|
tests/template_tests/test_engine.py
|
199
|
3971
|
import os
from django.template import Context
from django.template.engine import Engine
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from .utils import ROOT, TEMPLATE_DIR
OTHER_DIR = os.path.join(ROOT, 'other_templates')
@ignore_warnings(category=RemovedInDjango110Warning)
class DeprecatedRenderToStringTest(SimpleTestCase):
def setUp(self):
self.engine = Engine(
dirs=[TEMPLATE_DIR],
libraries={'custom': 'template_tests.templatetags.custom'},
)
def test_basic_context(self):
self.assertEqual(
self.engine.render_to_string('test_context.html', {'obj': 'test'}),
'obj:test\n',
)
def test_existing_context_kept_clean(self):
context = Context({'obj': 'before'})
output = self.engine.render_to_string(
'test_context.html', {'obj': 'after'}, context_instance=context,
)
self.assertEqual(output, 'obj:after\n')
self.assertEqual(context['obj'], 'before')
def test_no_empty_dict_pushed_to_stack(self):
"""
#21741 -- An empty dict should not be pushed to the context stack when
render_to_string is called without a context argument.
"""
# The stack should have a length of 1, corresponding to the builtins
self.assertEqual(
'1',
self.engine.render_to_string('test_context_stack.html').strip(),
)
self.assertEqual(
'1',
self.engine.render_to_string(
'test_context_stack.html',
context_instance=Context()
).strip(),
)
class LoaderTests(SimpleTestCase):
def test_origin(self):
engine = Engine(dirs=[TEMPLATE_DIR], debug=True)
template = engine.get_template('index.html')
self.assertEqual(template.origin.template_name, 'index.html')
def test_loader_priority(self):
"""
#21460 -- Check that the order of template loader works.
"""
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
def test_cached_loader_priority(self):
"""
Check that the order of template loader works. Refs #21460.
"""
loaders = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
engine = Engine(dirs=[OTHER_DIR, TEMPLATE_DIR], loaders=loaders)
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
template = engine.get_template('priority/foo.html')
self.assertEqual(template.render(Context()), 'priority\n')
@ignore_warnings(category=RemovedInDjango110Warning)
class TemplateDirsOverrideTests(SimpleTestCase):
DIRS = ((OTHER_DIR, ), [OTHER_DIR])
def setUp(self):
self.engine = Engine()
def test_render_to_string(self):
for dirs in self.DIRS:
self.assertEqual(
self.engine.render_to_string('test_dirs.html', dirs=dirs),
'spam eggs\n',
)
def test_get_template(self):
for dirs in self.DIRS:
template = self.engine.get_template('test_dirs.html', dirs=dirs)
self.assertEqual(template.render(Context()), 'spam eggs\n')
def test_select_template(self):
for dirs in self.DIRS:
template = self.engine.select_template(['test_dirs.html'], dirs=dirs)
self.assertEqual(template.render(Context()), 'spam eggs\n')
|
bsd-3-clause
|
eventql/eventql
|
deps/3rdparty/spidermonkey/mozjs/python/pyasn1/test/codec/cer/test_decoder.py
|
51
|
1126
|
from pyasn1.type import univ
from pyasn1.codec.cer import decoder
from pyasn1.compat.octets import ints2octs, str2octs, null
from pyasn1.error import PyAsn1Error
from sys import version_info
if version_info[0:2] < (2, 7) or \
version_info[0:2] in ( (3, 0), (3, 1) ):
try:
import unittest2 as unittest
except ImportError:
import unittest
else:
import unittest
class BooleanDecoderTestCase(unittest.TestCase):
def testTrue(self):
assert decoder.decode(ints2octs((1, 1, 255))) == (1, null)
def testFalse(self):
assert decoder.decode(ints2octs((1, 1, 0))) == (0, null)
class OctetStringDecoderTestCase(unittest.TestCase):
def testShortMode(self):
assert decoder.decode(
ints2octs((4, 15, 81, 117, 105, 99, 107, 32, 98, 114, 111, 119, 110, 32, 102, 111, 120)),
) == (str2octs('Quick brown fox'), null)
def testLongMode(self):
assert decoder.decode(
ints2octs((36, 128, 4, 130, 3, 232) + (81,)*1000 + (4, 1, 81, 0, 0))
) == (str2octs('Q'*1001), null)
if __name__ == '__main__': unittest.main()
|
agpl-3.0
|
wweiradio/django
|
tests/many_to_one_null/tests.py
|
142
|
6484
|
from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Car, Driver, Reporter
class ManyToOneNullTests(TestCase):
def setUp(self):
# Create a Reporter.
self.r = Reporter(name='John Smith')
self.r.save()
# Create an Article.
self.a = Article(headline="First", reporter=self.r)
self.a.save()
# Create an Article via the Reporter object.
self.a2 = self.r.article_set.create(headline="Second")
# Create an Article with no Reporter by passing "reporter=None".
self.a3 = Article(headline="Third", reporter=None)
self.a3.save()
# Create another article and reporter
self.r2 = Reporter(name='Paul Jones')
self.r2.save()
self.a4 = self.r2.article_set.create(headline='Fourth')
def test_get_related(self):
self.assertEqual(self.a.reporter.id, self.r.id)
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
def test_created_via_related_set(self):
self.assertEqual(self.a2.reporter.id, self.r.id)
def test_related_set(self):
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'),
['<Article: First>'])
self.assertEqual(self.r.article_set.count(), 2)
def test_created_without_related(self):
self.assertEqual(self.a3.reporter, None)
# Need to reget a3 to refresh the cache
a3 = Article.objects.get(pk=self.a3.pk)
self.assertRaises(AttributeError, getattr, a3.reporter, 'id')
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
self.assertEqual(a3.reporter, None)
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
# We can achieve the same thing by filtering for the case where the
# reporter is None.
self.assertQuerysetEqual(Article.objects.filter(reporter=None),
['<Article: Third>'])
# Set the reporter for the Third article
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.r.article_set.add(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>', '<Article: Third>'])
# Remove an article from the set, and check that it was removed.
self.r.article_set.remove(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
def test_remove_from_wrong_set(self):
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
# Try to remove a4 from a set it does not belong to
self.assertRaises(Reporter.DoesNotExist, self.r.article_set.remove, self.a4)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
def test_set(self):
# Use manager.set() to allocate ForeignKey. Null is legal, so existing
# members of the set that are not in the assignment set are set to null.
self.r2.article_set.set([self.a2, self.a3])
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Use manager.set(clear=True)
self.r2.article_set.set([self.a3, self.a4], clear=True)
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Fourth>', '<Article: Third>'])
# Clear the rest of the set
self.r2.article_set.set([])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Fourth>', '<Article: Second>', '<Article: Third>'])
def test_assign_clear_related_set(self):
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of the set that are not in the assignment set are
# set to null.
self.r2.article_set = [self.a2, self.a3]
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Clear the rest of the set
self.r.article_set.clear()
self.assertQuerysetEqual(self.r.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: First>', '<Article: Fourth>'])
def test_assign_with_queryset(self):
# Ensure that querysets used in reverse FK assignments are pre-evaluated
# so their value isn't affected by the clearing operation in
# ForeignRelatedObjectsDescriptor.__set__. Refs #19816.
self.r2.article_set = [self.a2, self.a3]
qs = self.r2.article_set.filter(headline="Second")
self.r2.article_set = qs
self.assertEqual(1, self.r2.article_set.count())
self.assertEqual(1, qs.count())
def test_add_efficiency(self):
r = Reporter.objects.create()
articles = []
for _ in range(3):
articles.append(Article.objects.create())
with self.assertNumQueries(1):
r.article_set.add(*articles)
self.assertEqual(r.article_set.count(), 3)
def test_clear_efficiency(self):
r = Reporter.objects.create()
for _ in range(3):
r.article_set.create()
with self.assertNumQueries(1):
r.article_set.clear()
self.assertEqual(r.article_set.count(), 0)
def test_related_null_to_field(self):
c1 = Car.objects.create()
d1 = Driver.objects.create()
self.assertIs(d1.car, None)
with self.assertNumQueries(0):
self.assertEqual(list(c1.drivers.all()), [])
|
bsd-3-clause
|
JonasThomas/free-cad
|
src/Mod/Cam/InitGui.py
|
1
|
2807
|
# Cam gui init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel ([email protected]) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU General Public License (GPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
class CamWorkbench ( Workbench ):
"Cam workbench object"
Icon = """
/* XPM */
static const char *Cam_Box[]={
"16 16 3 1",
". c None",
"# c #000000",
"a c #c6c642",
"................",
".......#######..",
"......#aaaaa##..",
".....#aaaaa###..",
"....#aaaaa##a#..",
"...#aaaaa##aa#..",
"..#aaaaa##aaa#..",
".########aaaa#..",
".#aaaaa#aaaaa#..",
".#aaaaa#aaaa##..",
".#aaaaa#aaa##...",
".#aaaaa#aa##....",
".#aaaaa#a##... .",
".#aaaaa###......",
".########.......",
"................"};
"""
MenuText = "Cam design"
ToolTip = "Cam"
def Initialize(self):
import CamGui
import Cam
def GetClassName(self):
return "CamGui::Workbench"
# No Workbench at the moment
Gui.addWorkbench(CamWorkbench())
|
lgpl-2.1
|
easyfmxu/zulip
|
zerver/templatetags/app_filters.py
|
125
|
1391
|
from django.template import Library
register = Library()
def and_n_others(values, limit):
# A helper for the commonly appended "and N other(s)" string, with
# the appropriate pluralization.
return " and %d other%s" % (len(values) - limit,
"" if len(values) == limit + 1 else "s")
@register.filter(name='display_list', is_safe=True)
def display_list(values, display_limit):
"""
Given a list of values, return a string nicely formatting those values,
summarizing when you have more than `display_limit`. Eg, for a
`display_limit` of 3 we get the following possible cases:
Jessica
Jessica and Waseem
Jessica, Waseem, and Tim
Jessica, Waseem, Tim, and 1 other
Jessica, Waseem, Tim, and 2 others
"""
if len(values) == 1:
# One value, show it.
display_string = "%s" % (values[0],)
elif len(values) <= display_limit:
# Fewer than `display_limit` values, show all of them.
display_string = ", ".join(
"%s" % (value,) for value in values[:-1])
display_string += " and %s" % (values[-1],)
else:
# More than `display_limit` values, only mention a few.
display_string = ", ".join(
"%s" % (value,) for value in values[:display_limit])
display_string += and_n_others(values, display_limit)
return display_string
|
apache-2.0
|
hughperkins/kgsgo-dataset-preprocessor
|
thirdparty/future/src/libpasteurize/fixes/fix_unpacking.py
|
60
|
5954
|
u"""
Fixer for:
(a,)* *b (,c)* [,] = s
for (a,)* *b (,c)* [,] in d: ...
"""
from lib2to3 import fixer_base
from itertools import count
from lib2to3.fixer_util import (Assign, Comma, Call, Newline, Name,
Number, token, syms, Node, Leaf)
from libfuturize.fixer_util import indentation, suitify, commatize
# from libfuturize.fixer_util import Assign, Comma, Call, Newline, Name, Number, indentation, suitify, commatize, token, syms, Node, Leaf
def assignment_source(num_pre, num_post, LISTNAME, ITERNAME):
u"""
Accepts num_pre and num_post, which are counts of values
before and after the starg (not including the starg)
Returns a source fit for Assign() from fixer_util
"""
children = []
pre = unicode(num_pre)
post = unicode(num_post)
# This code builds the assignment source from lib2to3 tree primitives.
# It's not very readable, but it seems like the most correct way to do it.
if num_pre > 0:
pre_part = Node(syms.power, [Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Leaf(token.COLON, u":"), Number(pre)]), Leaf(token.RSQB, u"]")])])
children.append(pre_part)
children.append(Leaf(token.PLUS, u"+", prefix=u" "))
main_part = Node(syms.power, [Leaf(token.LSQB, u"[", prefix=u" "), Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Number(pre) if num_pre > 0 else Leaf(1, u""), Leaf(token.COLON, u":"), Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]) if num_post > 0 else Leaf(1, u"")]), Leaf(token.RSQB, u"]"), Leaf(token.RSQB, u"]")])])
children.append(main_part)
if num_post > 0:
children.append(Leaf(token.PLUS, u"+", prefix=u" "))
post_part = Node(syms.power, [Name(LISTNAME, prefix=u" "), Node(syms.trailer, [Leaf(token.LSQB, u"["), Node(syms.subscript, [Node(syms.factor, [Leaf(token.MINUS, u"-"), Number(post)]), Leaf(token.COLON, u":")]), Leaf(token.RSQB, u"]")])])
children.append(post_part)
source = Node(syms.arith_expr, children)
return source
class FixUnpacking(fixer_base.BaseFix):
PATTERN = u"""
expl=expr_stmt< testlist_star_expr<
pre=(any ',')*
star_expr< '*' name=NAME >
post=(',' any)* [','] > '=' source=any > |
impl=for_stmt< 'for' lst=exprlist<
pre=(any ',')*
star_expr< '*' name=NAME >
post=(',' any)* [','] > 'in' it=any ':' suite=any>"""
def fix_explicit_context(self, node, results):
pre, name, post, source = (results.get(n) for n in (u"pre", u"name", u"post", u"source"))
pre = [n.clone() for n in pre if n.type == token.NAME]
name.prefix = u" "
post = [n.clone() for n in post if n.type == token.NAME]
target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
# to make the special-case fix for "*z, = ..." correct with the least
# amount of modification, make the left-side into a guaranteed tuple
target.append(Comma())
source.prefix = u""
setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [source.clone()]))
power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
return setup_line, power_line
def fix_implicit_context(self, node, results):
u"""
Only example of the implicit context is
a for loop, so only fix that.
"""
pre, name, post, it = (results.get(n) for n in (u"pre", u"name", u"post", u"it"))
pre = [n.clone() for n in pre if n.type == token.NAME]
name.prefix = u" "
post = [n.clone() for n in post if n.type == token.NAME]
target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
# to make the special-case fix for "*z, = ..." correct with the least
# amount of modification, make the left-side into a guaranteed tuple
target.append(Comma())
source = it.clone()
source.prefix = u""
setup_line = Assign(Name(self.LISTNAME), Call(Name(u"list"), [Name(self.ITERNAME)]))
power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
return setup_line, power_line
def transform(self, node, results):
u"""
a,b,c,d,e,f,*g,h,i = range(100) changes to
_3to2list = list(range(100))
a,b,c,d,e,f,g,h,i, = _3to2list[:6] + [_3to2list[6:-2]] + _3to2list[-2:]
and
for a,b,*c,d,e in iter_of_iters: do_stuff changes to
for _3to2iter in iter_of_iters:
_3to2list = list(_3to2iter)
a,b,c,d,e, = _3to2list[:2] + [_3to2list[2:-2]] + _3to2list[-2:]
do_stuff
"""
self.LISTNAME = self.new_name(u"_3to2list")
self.ITERNAME = self.new_name(u"_3to2iter")
expl, impl = results.get(u"expl"), results.get(u"impl")
if expl is not None:
setup_line, power_line = self.fix_explicit_context(node, results)
setup_line.prefix = expl.prefix
power_line.prefix = indentation(expl.parent)
setup_line.append_child(Newline())
parent = node.parent
i = node.remove()
parent.insert_child(i, power_line)
parent.insert_child(i, setup_line)
elif impl is not None:
setup_line, power_line = self.fix_implicit_context(node, results)
suitify(node)
suite = [k for k in node.children if k.type == syms.suite][0]
setup_line.prefix = u""
power_line.prefix = suite.children[1].value
suite.children[2].prefix = indentation(suite.children[2])
suite.insert_child(2, Newline())
suite.insert_child(2, power_line)
suite.insert_child(2, Newline())
suite.insert_child(2, setup_line)
results.get(u"lst").replace(Name(self.ITERNAME, prefix=u" "))
|
mpl-2.0
|
bluemonk482/tdparse
|
src/sklearnSVM.py
|
1
|
3651
|
import os, time
from argparse import ArgumentParser
import numpy as np
from sklearn import svm, metrics
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit, GroupKFold
from sklearn.datasets import load_svmlight_file
from sklearn.externals import joblib
from utilities import readfeats, readfeats_sklearn, twoclass_fscore, frange, writingfile
# from liblinear import scaling
def macro_averaged_precision(y_true, y_predicted):
p = metrics.precision_score(y_true, y_predicted, average='macro')
return p
def predict(clf, x_train, y_train, x_test, y_test):
y_predicted = clf.predict(x_test)
print 'Macro-F1 score: ', metrics.f1_score(y_test, y_predicted, average='macro')
print 'Accuracy score: ', metrics.accuracy_score(y_test, y_predicted)
print "Macro-F1 score (2 classes):", (metrics.f1_score(y_test, y_predicted, average=None)[0]+metrics.f1_score(y_test, y_predicted, average=None)[-1])/2
return y_predicted
def CV(x_train, y_train):
c=[]
crange=frange(0.00001,1,10)
c.extend([i for i in crange])
crange=frange(0.00003,3,10)
c.extend([i for i in crange])
crange=frange(0.00005,5,10)
c.extend([i for i in crange])
crange=frange(0.00007,7,10)
c.extend([i for i in crange])
crange=frange(0.00009,10,10)
c.extend([i for i in crange])
c.sort() #Cost parameter values; use a bigger search space for better performance
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0).split(x_train, y_train)
# ids = readfeats('../data/election/output/id_train') # only for election data
# cv = GroupKFold(n_splits=5).split(x_train, y_train, ids)
clf = svm.LinearSVC()
param_grid = [{'C': c}]
twoclass_f1_macro = metrics.make_scorer(twoclass_fscore, greater_is_better=True)
precision_macro = metrics.make_scorer(macro_averaged_precision, greater_is_better=True)
grid_search = GridSearchCV(clf, param_grid=param_grid, cv=cv, verbose=0, scoring='f1_macro')
grid_search.fit(x_train, y_train)
print("Best parameters set:")
print '\n'
print(grid_search.best_estimator_)
print '\n'
print(grid_search.best_score_)
print(grid_search.best_params_)
print '\n'
return grid_search.best_estimator_
def save_model(clf, filepath):
joblib.dump(clf, filepath)
def main(output_dir):
trfile = '../data/'+output_dir+'/train.scale'
tfile = '../data/'+output_dir+'/test.scale'
pfile = '../data/'+output_dir+'/predresults'
truefile = '../data/'+output_dir+'/y_test'
# print "scaling features"
# scaling(output_dir)
print "loading features for training"
x_train, y_train = readfeats_sklearn(trfile)
print "loading features for testing"
x_test, y_test = readfeats_sklearn(tfile)
print "cross-validation"
clf = CV(x_train, y_train) # Comment this if parameter tuning is not desired
# print "training classifier"
# clf = svm.LinearSVC(C=1, class_weight='balanced') # Manually select C-parameter for training SVM
# clf.fit(x_train, y_train)
# print "saving trained model"
# save_model(clf, '../models/sklearn_saved.model')
print "evaluation"
preds = predict(clf, x_train, y_train, x_test, y_test)
print "writing labels"
writingfile(pfile, preds)
if __name__ == "__main__":
start = time.clock()
parser = ArgumentParser()
parser.add_argument("--data", dest="d", help="Output folder name", default='election')
args = parser.parse_args()
output_dir = args.d + '/output'
main(output_dir)
print "\n"
print "Time taken:", time.clock() - start
|
mit
|
dpgoetz/sos
|
setup.py
|
1
|
1592
|
#!/usr/bin/python
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from sos import __version__ as version
name = 'sos'
setup(
name=name,
version=version,
description='Swift Origin Server',
license='Apache License (2.0)',
author='OpenStack, LLC.',
author_email='[email protected]',
url='https://github.com/dpgoetz/sos',
packages=find_packages(exclude=['test_sos', 'bin']),
test_suite='nose.collector',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Environment :: No Input/Output (Daemon)',
],
install_requires=[], # removed for better compat
scripts=[
'bin/swift-origin-prep',
'bin/origin-ref-migration',
],
entry_points={
'paste.filter_factory': [
'sos=sos.origin:filter_factory',
],
},
)
|
apache-2.0
|
Andrew-McNab-UK/DIRAC
|
tests/Integration/DataManagementSystem/Test_Client_FTS.py
|
2
|
8332
|
""" This is a test of the chain
FTSClient -> FTSManagerHandler -> FTSDB
It supposes that the DB is present, and that the service is running
"""
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import unittest
#import mock
import uuid
from DIRAC import gLogger
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSSite import FTSSite
# from DIRAC.DataManagementSystem.private.FTSHistoryView import FTSHistoryView
# # # SUT
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
class FTSDBTestCase( unittest.TestCase ):
"""
.. class:: FTSDBTests
"""
def setUp( self ):
""" test case set up """
gLogger.setLevel( 'NOTICE' )
self.ftsSites = [ FTSSite( ftsServer = 'https://fts22-t0-export.cern.ch:8443/glite-data-transfer-fts/services/FileTransfer', name = 'CERN.ch' ),
FTSSite( ftsServer = 'https://fts.pic.es:8443/glite-data-transfer-fts/services/FileTransfer', name = 'PIC.es' ),
FTSSite( ftsServer = 'https://lcgfts.gridpp.rl.ac.uk:8443/glite-data-transfer-fts/services/FileTransfer', name = 'RAL.uk' ),
]
self.ses = [ 'CERN-USER', 'RAL-USER' ]
self.statuses = [ 'Submitted', 'Finished', 'FinishedDirty', 'Active', 'Ready' ]
self.submitted = 0
self.numberOfJobs = 10
self.opIDs = []
self.ftsJobs = []
for i in xrange( self.numberOfJobs ):
opID = i % 3
if opID not in self.opIDs:
self.opIDs.append( opID )
ftsJob = FTSJob()
ftsJob.FTSGUID = str( uuid.uuid4() )
ftsJob.FTSServer = self.ftsSites[0].FTSServer
ftsJob.Status = self.statuses[ i % len( self.statuses ) ]
ftsJob.OperationID = opID
if ftsJob.Status in FTSJob.FINALSTATES:
ftsJob.Completeness = 100
if ftsJob.Status == 'Active':
ftsJob.Completeness = 90
ftsJob.SourceSE = self.ses[ i % len( self.ses ) ]
ftsJob.TargetSE = 'PIC-USER'
ftsJob.RequestID = 12345
ftsFile = FTSFile()
ftsFile.FileID = i + 1
ftsFile.OperationID = i + 1
ftsFile.LFN = '/a/b/c/%d' % i
ftsFile.Size = 1000000
ftsFile.OperationID = opID
ftsFile.SourceSE = ftsJob.SourceSE
ftsFile.TargetSE = ftsJob.TargetSE
ftsFile.SourceSURL = 'foo://source.bar.baz/%s' % ftsFile.LFN
ftsFile.TargetSURL = 'foo://target.bar.baz/%s' % ftsFile.LFN
ftsFile.Status = 'Waiting' if ftsJob.Status != 'FinishedDirty' else 'Failed'
ftsFile.RequestID = 12345
ftsFile.Checksum = 'addler'
ftsFile.ChecksumType = 'adler32'
ftsFile.FTSGUID = ftsJob.FTSGUID
if ftsJob.Status == 'FinishedDirty':
ftsJob.FailedFiles = 1
ftsJob.FailedSize = ftsFile.Size
ftsJob.addFile( ftsFile )
self.ftsJobs.append( ftsJob )
self.submitted = len( [ i for i in self.ftsJobs if i.Status == 'Submitted' ] )
self.ftsClient = FTSClient()
# self.ftsClient.replicaManager = mock.Mock()
# self.ftsClient.replicaManager.getActiveReplicas.return_value = {'OK': True,
# 'Value': {'Successful': {'/a/b/c/1':{'CERN-USER':'/aa/a/b/c/1d',
# 'RAL-USER':'/bb/a/b/c/1d'},
# '/a/b/c/2':{'CERN-USER':'/aa/a/b/c/2d',
# 'RAL-USER':'/bb/a/b/c/2d'},
# '/a/b/c/3':{'CERN-USER':'/aa/a/b/c/3d',
# 'RAL-USER':'/bb/a/b/c/3d'}
# },
# 'Failed': {'/a/b/c/4':'/aa/a/b/c/4d',
# '/a/b/c/5':'/aa/a/b/c/5d'}
# }
# }
def tearDown( self ):
""" clean up """
del self.ftsJobs
del self.ftsSites
class FTSClientChain( FTSDBTestCase ):
def test_addAndRemoveJobs( self ):
""" put, get, peek, delete jobs methods """
print 'putJob'
for ftsJob in self.ftsJobs:
put = self.ftsClient.putFTSJob( ftsJob )
self.assertEqual( put['OK'], True )
print 'getFTSJobIDs'
res = self.ftsClient.getFTSJobIDs( self.statuses )
self.assertEqual( res['OK'], True )
self.assertEqual( len( res['Value'] ), self.numberOfJobs )
FTSjobIDs = res['Value']
print 'getFTSJobList'
self.ftsClient.getFTSJobList( self.statuses, self.numberOfJobs )
self.assertEqual( res['OK'], True )
self.assertEqual( len( res['Value'] ), self.numberOfJobs )
print 'peekJob'
for i in FTSjobIDs:
peek = self.ftsClient.peekFTSJob( i )
self.assertEqual( peek['OK'], True )
self.assertEqual( len( peek['Value']['FTSFiles'] ), 1 )
print 'getJob'
for i in FTSjobIDs:
get = self.ftsClient.getFTSJob( i )
self.assertEqual( get['OK'], True )
self.assertEqual( len( get['Value']['FTSFiles'] ), 1 )
print 'getFTSFileIDs'
res = self.ftsClient.getFTSFileIDs()
self.assertEqual( res['OK'], True )
FTSfileIDs = res['Value']
print 'getFTSFileList'
res = self.ftsClient.getFTSFileList()
self.assertEqual( res['OK'], True )
print 'peekFTSFile'
for i in FTSfileIDs:
peek = self.ftsClient.peekFTSFile( i )
self.assertEqual( peek['OK'], True )
print 'getFTSFile'
for i in FTSfileIDs:
res = self.ftsClient.getFTSFile( i )
self.assertEqual( res['OK'], True )
print 'deleteJob'
for i in FTSjobIDs:
delete = self.ftsClient.deleteFTSJob( i )
self.assertEqual( delete['OK'], True )
print 'deleteFiles'
for i in self.opIDs:
res = self.ftsClient.deleteFTSFiles( i )
self.assertTrue(res['OK'])
class FTSClientMix( FTSDBTestCase ):
def test_mix( self ):
""" all the other tests"""
opFileList = []
for ftsJob in self.ftsJobs:
self.ftsClient.putFTSJob( ftsJob )
opFileList.append( ( ftsJob[0].toJSON()["Value"], self.ses, self.ses ) )
# ftsSchedule can't work since the FTSStrategy object is refreshed in the service so it can't be mocked
# for opID in self.opIDs:
# res = self.ftsClient.ftsSchedule( 12345, opID, opFileList )
# self.assertTrue(res['OK'])
print 'setFTSFilesWaiting'
for operationID in self.opIDs:
for sourceSE in self.ses:
res = self.ftsClient.setFTSFilesWaiting( operationID, sourceSE )
self.assertEqual( res['OK'], True )
print 'getFTSHistory'
res = self.ftsClient.getFTSHistory()
self.assertEqual( res['OK'], True )
self.assertTrue( type( res['Value'] ) == type( [] ) )
print 'getFTSJobsForRequest'
res = self.ftsClient.getFTSJobsForRequest( 12345 )
self.assertEqual( res['OK'], True )
print 'getFTSFilesForRequest'
res = self.ftsClient.getFTSFilesForRequest( 12345 )
self.assertEqual( res['OK'], True )
print 'getDBSummary'
res = self.ftsClient.getDBSummary()
self.assertEqual( res['OK'], True )
FTSjobIDs = self.ftsClient.getFTSJobIDs( self.statuses )['Value']
print 'deleteJob'
for i in FTSjobIDs:
delete = self.ftsClient.deleteFTSJob( i )
self.assertEqual( delete['OK'], True )
print 'deleteFiles'
for i in self.opIDs:
res = self.ftsClient.deleteFTSFiles( i )
self.assertTrue(res['OK'])
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( FTSDBTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( FTSClientChain ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( FTSClientMix ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
gpl-3.0
|
dcroc16/skunk_works
|
google_appengine/lib/PyAMF-0.6.1/pyamf/remoting/gateway/wsgi.py
|
39
|
6037
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
WSGI server implementation.
The Python Web Server Gateway Interface (WSGI) is a simple and universal
interface between web servers and web applications or frameworks.
The WSGI interface has two sides: the "server" or "gateway" side, and the
"application" or "framework" side. The server side invokes a callable
object (usually a function or a method) that is provided by the application
side. Additionally WSGI provides middlewares; a WSGI middleware implements
both sides of the API, so that it can be inserted "between" a WSGI server
and a WSGI application -- the middleware will act as an application from
the server's point of view, and as a server from the application's point
of view.
@see: U{WSGI homepage (external)<http://wsgi.org>}
@see: U{PEP-333 (external)<http://www.python.org/peps/pep-0333.html>}
@since: 0.1.0
"""
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway
__all__ = ['WSGIGateway']
class WSGIGateway(gateway.BaseGateway):
"""
WSGI Remoting Gateway.
"""
def getResponse(self, request, environ):
"""
Processes the AMF request, returning an AMF response.
@param request: The AMF Request.
@type request: L{Envelope<pyamf.remoting.Envelope>}
@rtype: L{Envelope<pyamf.remoting.Envelope>}
@return: The AMF Response.
"""
response = remoting.Envelope(request.amfVersion)
for name, message in request:
processor = self.getProcessor(message)
environ['pyamf.request'] = message
response[name] = processor(message, http_request=environ)
return response
def badRequestMethod(self, environ, start_response):
"""
Return HTTP 400 Bad Request.
"""
response = "400 Bad Request\n\nTo access this PyAMF gateway you " \
"must use POST requests (%s received)" % environ['REQUEST_METHOD']
start_response('400 Bad Request', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
def __call__(self, environ, start_response):
"""
@rtype: C{StringIO}
@return: File-like object.
"""
if environ['REQUEST_METHOD'] != 'POST':
return self.badRequestMethod(environ, start_response)
body = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
stream = None
timezone_offset = self._get_timezone_offset()
# Decode the request
try:
request = remoting.decode(body, strict=self.strict,
logger=self.logger, timezone_offset=timezone_offset)
except (pyamf.DecodeError, IOError):
if self.logger:
self.logger.exception('Error decoding AMF request')
response = "400 Bad Request\n\nThe request body was unable to " \
"be successfully decoded."
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('400 Bad Request', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception('Unexpected error decoding AMF request')
response = ("500 Internal Server Error\n\nAn unexpected error "
"occurred whilst decoding.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
if self.logger:
self.logger.debug("AMF Request: %r" % request)
# Process the request
try:
response = self.getResponse(request, environ)
except (KeyboardInterrupt, SystemExit):
raise
except:
if self.logger:
self.logger.exception('Error processing AMF request')
response = ("500 Internal Server Error\n\nThe request was "
"unable to be successfully processed.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
if self.logger:
self.logger.debug("AMF Response: %r" % response)
# Encode the response
try:
stream = remoting.encode(response, strict=self.strict,
timezone_offset=timezone_offset)
except:
if self.logger:
self.logger.exception('Error encoding AMF request')
response = ("500 Internal Server Error\n\nThe request was "
"unable to be encoded.")
if self.debug:
response += "\n\nTraceback:\n\n%s" % gateway.format_exception()
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
response = stream.getvalue()
start_response('200 OK', [
('Content-Type', remoting.CONTENT_TYPE),
('Content-Length', str(len(response))),
('Server', gateway.SERVER_NAME),
])
return [response]
|
mit
|
jfozard/pyvol
|
pyvol/renderer.py
|
1
|
24178
|
import sys
import os.path
import numpy as np
import numpy.linalg as la
import math
import warnings
# Suppress UserWarning from external.transform module
warnings.filterwarnings("ignore", module="external.transform", lineno=1833)
from mesh.GLmesh import GLMesh
import OpenGL.GL
from OpenGL.GL import (
GL_ELEMENT_ARRAY_BUFFER,
GL_TEXTURE_2D,
GL_TEXTURE_3D,
GL_TEXTURE0,
GL_TEXTURE_MIN_FILTER,
GL_TEXTURE_MAG_FILTER,
GL_TEXTURE_WRAP_S,
GL_TEXTURE_WRAP_T,
GL_TEXTURE_WRAP_R,
GL_CLAMP_TO_EDGE,
GL_LINEAR,
GL_UNPACK_ALIGNMENT,
GL_RED,
GL_COLOR_BUFFER_BIT,
GL_DEPTH_TEST,
GL_DEPTH_BUFFER_BIT,
GL_RGBA,
GL_RGBA16F,
GL_CULL_FACE,
GL_BACK,
GL_FRONT,
GL_TRIANGLES,
GL_UNSIGNED_INT,
GL_UNSIGNED_BYTE,
GL_FLOAT,
glGenTextures,
glTexImage2D,
glTexImage3D,
glTexSubImage3D,
glTexParameter,
glActiveTexture,
glBindTexture,
glPixelStorei,
glViewport,
glClear,
glClearColor,
glDeleteTextures,
glFramebufferTexture2D,
glEnable,
glDisable,
glCullFace,
glDrawElements,
glPolygonMode,
GL_FRONT_AND_BACK,
GL_FILL,
GL_LINE,
)
import OpenGL.GLUT
from OpenGL.GL.shaders import (
glUseProgram,
glVertexAttribPointer,
glEnableVertexAttribArray,
glUniform1i,
glUniform1f,
glUniformMatrix4fv,
)
from OpenGL.GL.framebufferobjects import (
GL_FRAMEBUFFER,
GL_FRAMEBUFFER_EXT,
GL_COLOR_ATTACHMENT0_EXT,
glGenFramebuffers,
glBindFramebuffer,
)
from OpenGL.arrays.vbo import VBO
from OpenGL.GL.ARB.vertex_array_object import (
glGenVertexArrays,
glBindVertexArray,
)
# The above does not work on MacOSX, so overwrite
if sys.platform == "darwin":
from OpenGL.GL.APPLE.vertex_array_object import glGenVertexArraysAPPLE \
as glGenVertexArrays
from OpenGL.GL.APPLE.vertex_array_object import glBindVertexArrayAPPLE \
as glBindVertexArray
from OpenGL.GL.ARB.texture_rg import (
GL_R8,
)
from shaders.program import ShaderProgram, compile_vertex_shader_from_source, \
compile_fragment_shader_from_source
from external.transformations import Arcball, translation_matrix, scale_matrix
HERE = os.path.dirname(os.path.realpath(__file__))
SHADER_SOURCE_DIR = os.path.join(HERE, "shaders")
def perspective(fovy, aspect, zNear, zFar):
f = 1.0/math.tan(fovy/2.0/180*math.pi)
c1 = (zFar+zNear)/(zNear-zFar)
c2 = 2*zFar*zNear/(zNear-zFar)
return np.array(((f/aspect, 0, 0, 0),
(0, f, 0, 0),
(0, 0, c1, c2),
(0, 0, -1, 0)))
class StackObject(object):
def __init__(self, stack, spacing):
s = np.array(stack, dtype=np.uint8, order='F')
w, h, d = s.shape
stack_texture = glGenTextures(1)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_3D, stack_texture)
glTexParameter(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameter(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glTexParameter(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameter(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameter(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE)
glTexImage3D(GL_TEXTURE_3D, 0, GL_R8, d, h, w, 0, GL_RED,
GL_UNSIGNED_BYTE, s)
self.stack_texture = stack_texture
self.shape = s.shape
def update_stack(self, stack):
s = np.array(stack, dtype=np.uint8, order='F')
w, h, d = s.shape
glActiveTexture(GL_TEXTURE0)
glTexSubImage3D(GL_TEXTURE_3D, # Target
0, # Level
0, 0, 0, # xoffset, yoffset, zoffset
d, h, w, # width, height, depth
GL_RED, # Format of the pixel data
GL_UNSIGNED_BYTE, # Type of the pixel data
s) # Data
class VolumeObject(object):
def __init__(self, stack, spacing):
self.active = True
self.stack_object = StackObject(stack, spacing)
shape = self.stack_object.shape
self.vao = glGenVertexArrays(1)
glBindVertexArray(self.vao)
tl = np.array((shape[2]*spacing[0], # x
shape[1]*spacing[1], # y
shape[0]*spacing[2])) # z
# Vertex buffer: corners of cube.
# x, y, z, texture_x, texture_y, texture_z
vb = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # Corner 0.
[tl[0], 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, tl[1], 0.0, 0.0, 1.0, 0.0],
[tl[0], tl[1], 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, tl[2], 0.0, 0.0, 1.0],
[tl[0], 0.0, tl[2], 1.0, 0.0, 1.0],
[0.0, tl[1], tl[2], 0.0, 1.0, 1.0],
[tl[0], tl[1], tl[2], 1.0, 1.0, 1.0]] # Corner 7.
vb = np.array(vb, dtype=np.float32)
vb = vb.flatten()
# Triangles of cube.
idx_out = np.array([[0, 2, 1], [2, 3, 1], # Triangle 0, triangle 1.
[1, 4, 0], [1, 5, 4],
[3, 5, 1], [3, 7, 5],
[2, 7, 3], [2, 6, 7],
[0, 6, 2], [0, 4, 6],
[5, 6, 4], [5, 7, 6]], # Triangle 10, triangle 11.
dtype=np.uint32)
self.vtVBO = VBO(vb)
self.vtVBO.bind()
sc = 1.0/la.norm(tl)
c = 0.5*tl
self.transform = np.array(((sc, 0.0, 0.0, -sc*c[0]),
(0.0, sc, 0.0, -sc*c[1]),
(0.0, 0.0, sc, -sc*c[2]),
(0.0, 0.0, 0.0, 1.0)))
self.tex_transform = np.array(((1.0/tl[0], 0.0, 0.0, 0.0),
(0.0, 1.0/tl[1], 0.0, 0.0),
(0.0, 0.0, 1.0/tl[2], 0.0),
(0.0, 0.0, 0.0, 1.0)))
glBindVertexArray(0)
self.elVBO = VBO(idx_out, target=GL_ELEMENT_ARRAY_BUFFER)
self.elCount = len(idx_out.flatten())
def update_stack(self, stack):
self.stack_object.update_stack(stack)
class MeshObject(object):
def __init__(self, fn, spacing):
self.active = True
m = GLMesh()
self.mesh = m
sc = m.load_ply(fn)
v_out, n_out, col_out, idx_out = m.generate_arrays()
vb = np.concatenate((v_out, n_out, col_out), axis=1)
self.elVBO = VBO(idx_out, target=GL_ELEMENT_ARRAY_BUFFER)
self.elCount = len(idx_out.flatten())
self.vao = glGenVertexArrays(1)
glBindVertexArray(self.vao)
self.vtVBO = VBO(vb)
self.vtVBO.bind()
glBindVertexArray(0)
c = np.array((0, 0, 0))
self.transform = np.array(((sc, 0.0, 0.0, -sc*c[0]),
(0.0, sc, 0.0, -sc*c[1]),
(0.0, 0.0, sc, -sc*c[2]),
(0.0, 0.0, 0.0, 1.0)))
class IsosurfaceVolumeRenderer(object):
def __init__(self):
self.bfTex = None
self.fbo = None
self.volume_objects = []
self._make_volume_shaders()
def _make_volume_shaders(self):
vertex = compile_vertex_shader_from_source("volume.vert")
front_fragment = compile_fragment_shader_from_source("volume_iso_front.frag")
back_fragment = compile_fragment_shader_from_source("volume_back.frag")
self.b_shader = ShaderProgram(vertex, back_fragment)
self.f_shader = ShaderProgram(vertex, front_fragment)
self.volume_stride = 6 * 4
def _render_volume_obj(self, volume_object, width, height, VMatrix, PMatrix):
glBindFramebuffer(GL_FRAMEBUFFER, self.fbo)
glViewport(0, 0, width, height)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_3D, volume_object.stack_object.stack_texture)
glClear(GL_COLOR_BUFFER_BIT) # Clear back buffer.
glEnable(GL_CULL_FACE)
glCullFace(GL_BACK)
glUseProgram(self.b_shader.program)
glBindVertexArray(volume_object.vao)
volume_object.elVBO.bind()
mv_matrix = np.dot(VMatrix, volume_object.transform)
glUniformMatrix4fv(self.b_shader.get_uniform("mv_matrix"),
1, True, mv_matrix.astype('float32'))
glUniformMatrix4fv(self.b_shader.get_uniform("p_matrix"),
1, True, PMatrix.astype('float32'))
glDrawElements(GL_TRIANGLES, volume_object.elCount,
GL_UNSIGNED_INT, volume_object.elVBO)
volume_object.elVBO.unbind()
glBindVertexArray(0)
glUseProgram(0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glActiveTexture(GL_TEXTURE0 + 1)
glBindTexture(GL_TEXTURE_2D, self.bfTex)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_3D, volume_object.stack_object.stack_texture)
glUseProgram(self.f_shader.program)
glUniform1i(self.f_shader.get_uniform("texture3s"), 0)
glUniform1i(self.f_shader.get_uniform("backfaceTex"), 1)
tex_inv_matrix = np.dot(PMatrix,
np.dot(mv_matrix,
la.inv(volume_object.tex_transform)))
glUniformMatrix4fv(self.f_shader.get_uniform('tex_inv_matrix'),
1,
True,
tex_inv_matrix.astype('float32'))
glUniform1f(self.f_shader.get_uniform('isolevel'),
volume_object.threshold/255.0)
glEnable(GL_CULL_FACE)
glCullFace(GL_FRONT)
glBindVertexArray(volume_object.vao)
volume_object.elVBO.bind()
glUniformMatrix4fv(self.f_shader.get_uniform("mv_matrix"),
1, True, mv_matrix.astype('float32'))
glUniformMatrix4fv(self.f_shader.get_uniform("p_matrix"),
1, True, PMatrix.astype('float32'))
glDrawElements(GL_TRIANGLES, volume_object.elCount,
GL_UNSIGNED_INT, volume_object.elVBO)
glActiveTexture(GL_TEXTURE0+1)
glBindTexture(GL_TEXTURE_2D, 0)
glCullFace(GL_BACK)
volume_object.elVBO.unbind()
glBindVertexArray(0)
glUseProgram(0)
def render(self, width, height, VMatrix, PMatrix):
for volume_object in self.volume_objects:
if volume_object.active:
self._render_volume_obj(volume_object, width, height, VMatrix, PMatrix)
def make_volume_obj(self, fn, spacing):
volume_object = VolumeObject(fn, spacing)
volume_object.threshold = 15
glBindVertexArray(volume_object.vao)
glEnableVertexAttribArray(self.b_shader.get_attrib("position"))
glVertexAttribPointer(self.b_shader.get_attrib("position"),
3,
GL_FLOAT,
False,
self.volume_stride,
volume_object.vtVBO)
glEnableVertexAttribArray(self.b_shader.get_attrib("texcoord"))
glVertexAttribPointer(
self.b_shader.get_attrib("texcoord"),
3, GL_FLOAT, False, self.volume_stride, volume_object.vtVBO+12
)
glBindVertexArray(0)
self.volume_objects.append(volume_object)
def init_back_texture(self, width, height):
if self.fbo is None:
self.fbo = glGenFramebuffers(1)
glActiveTexture(GL_TEXTURE0 + 1)
if self.bfTex is not None:
glDeleteTextures([self.bfTex])
self.bfTex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.bfTex)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
w = int(width)
h = int(height)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, w, h, 0,
GL_RGBA, GL_FLOAT, None)
glBindFramebuffer(GL_FRAMEBUFFER, self.fbo)
glFramebufferTexture2D(GL_FRAMEBUFFER_EXT,
GL_COLOR_ATTACHMENT0_EXT,
GL_TEXTURE_2D,
self.bfTex, 0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glBindTexture(GL_TEXTURE_2D, 0)
class SolidRenderer(object):
def __init__(self):
self.solid_objects = []
self._make_solid_shaders()
def _make_solid_shaders(self):
vertex = compile_vertex_shader_from_source("solid_surface.vert")
fragment = compile_fragment_shader_from_source("solid_surface.frag")
self.shader = ShaderProgram(vertex, fragment)
self.stride = 9 * 4
def _render_solid_obj(self, solid_object, width, height, VMatrix, PMatrix):
glEnable(GL_DEPTH_TEST)
glUseProgram(self.shader.program)
glBindVertexArray(solid_object.vao)
solid_object.elVBO.bind()
mv_matrix = np.dot(VMatrix, solid_object.transform)
glUniformMatrix4fv(self.shader.get_uniform("mv_matrix"),
1, True, mv_matrix.astype('float32'))
glUniformMatrix4fv(self.shader.get_uniform("p_matrix"),
1, True, PMatrix.astype('float32'))
glDisable(GL_CULL_FACE)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glDrawElements(GL_TRIANGLES, solid_object.elCount,
GL_UNSIGNED_INT, solid_object.elVBO)
solid_object.elVBO.unbind()
glBindVertexArray(0)
glUseProgram(0)
def render(self, width, height, VMatrix, PMatrix):
for solid_object in self.solid_objects:
if solid_object.active:
self._render_solid_obj(solid_object, width, height, VMatrix, PMatrix)
def make_solid_obj(self, fn, spacing):
mesh_object = MeshObject(fn, spacing)
glBindVertexArray(mesh_object.vao)
glEnableVertexAttribArray(self.shader.get_attrib("position"))
glVertexAttribPointer(self.shader.get_attrib("position"),
3,
GL_FLOAT,
False,
self.stride,
mesh_object.vtVBO)
glEnableVertexAttribArray(self.shader.get_attrib("normal"))
glVertexAttribPointer(
self.shader.get_attrib("normal"),
3, GL_FLOAT, False, self.stride, mesh_object.vtVBO+12
)
glEnableVertexAttribArray(self.shader.get_attrib("color"))
glVertexAttribPointer(
self.shader.get_attrib("color"),
3, GL_FLOAT, False, self.stride, mesh_object.vtVBO+24
)
glBindVertexArray(0)
self.solid_objects.append(mesh_object)
class VolumeRenderer(object):
def __init__(self):
self.bfTex = None
self.fbo = None
self.volume_objects = []
self._make_volume_shaders()
def _make_volume_shaders(self):
vertex = compile_vertex_shader_from_source("volume.vert")
front_fragment = compile_fragment_shader_from_source("volume_front.frag")
back_fragment = compile_fragment_shader_from_source("volume_back.frag")
self.b_shader = ShaderProgram(vertex, back_fragment)
self.f_shader = ShaderProgram(vertex, front_fragment)
self.volume_stride = 6 * 4
def _render_volume_obj(self, volume_object, width, height, VMatrix, PMatrix):
glBindFramebuffer(GL_FRAMEBUFFER, self.fbo)
glViewport(0, 0, width, height)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_3D, volume_object.stack_object.stack_texture)
glClear(GL_COLOR_BUFFER_BIT) # Clear back buffer.
glEnable(GL_CULL_FACE)
glCullFace(GL_FRONT) # NB flipped
# glValidateProgram(self.b_shader.program)
# logging.debug("b_valid ", glGetProgramiv(self.b_shader.program,
# GL_VALIDATE_STATUS))
# logging.debug(glGetProgramInfoLog(self.b_shader.program).decode())
glUseProgram(self.b_shader.program)
glBindVertexArray(volume_object.vao)
volume_object.elVBO.bind()
mv_matrix = np.dot(VMatrix, volume_object.transform)
glUniformMatrix4fv(self.b_shader.get_uniform("mv_matrix"),
1, True, mv_matrix.astype('float32'))
glUniformMatrix4fv(self.b_shader.get_uniform("p_matrix"),
1, True, PMatrix.astype('float32'))
glDrawElements(GL_TRIANGLES, volume_object.elCount,
GL_UNSIGNED_INT, volume_object.elVBO)
volume_object.elVBO.unbind()
glBindVertexArray(0)
glUseProgram(0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glActiveTexture(GL_TEXTURE0 + 1)
glBindTexture(GL_TEXTURE_2D, self.bfTex)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_3D, volume_object.stack_object.stack_texture)
glUseProgram(self.f_shader.program)
glUniform1i(self.f_shader.get_uniform("texture3s"), 0)
glUniform1i(self.f_shader.get_uniform("backfaceTex"), 1)
glEnable(GL_CULL_FACE)
glCullFace(GL_BACK)
glBindVertexArray(volume_object.vao)
volume_object.elVBO.bind()
glUniformMatrix4fv(self.f_shader.get_uniform("mv_matrix"),
1, True, mv_matrix.astype('float32'))
glUniformMatrix4fv(self.f_shader.get_uniform("p_matrix"),
1, True, PMatrix.astype('float32'))
glDrawElements(GL_TRIANGLES, volume_object.elCount,
GL_UNSIGNED_INT, volume_object.elVBO)
glActiveTexture(GL_TEXTURE0+1)
glBindTexture(GL_TEXTURE_2D, 0)
glCullFace(GL_BACK)
volume_object.elVBO.unbind()
glBindVertexArray(0)
glUseProgram(0)
def render(self, width, height, VMatrix, PMatrix):
for volume_object in self.volume_objects:
if volume_object.active:
self._render_volume_obj(volume_object, width, height, VMatrix, PMatrix)
def make_volume_obj(self, stack, spacing):
volume_object = VolumeObject(stack, spacing)
glBindVertexArray(volume_object.vao)
glEnableVertexAttribArray(self.b_shader.get_attrib("position"))
glVertexAttribPointer(self.b_shader.get_attrib("position"),
3,
GL_FLOAT,
False,
self.volume_stride,
volume_object.vtVBO)
glEnableVertexAttribArray(self.b_shader.get_attrib("texcoord"))
glVertexAttribPointer(
self.b_shader.get_attrib("texcoord"),
3, GL_FLOAT, False, self.volume_stride, volume_object.vtVBO+12
)
glBindVertexArray(0)
self.volume_objects.append(volume_object)
def init_back_texture(self, width, height):
if self.fbo is None:
self.fbo = glGenFramebuffers(1)
glActiveTexture(GL_TEXTURE0 + 1)
if self.bfTex is not None:
glDeleteTextures([self.bfTex])
self.bfTex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.bfTex)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameter(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
w = int(width)
h = int(height)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, w, h, 0,
GL_RGBA, GL_FLOAT, None)
glBindFramebuffer(GL_FRAMEBUFFER, self.fbo)
glFramebufferTexture2D(GL_FRAMEBUFFER_EXT,
GL_COLOR_ATTACHMENT0_EXT,
GL_TEXTURE_2D,
self.bfTex, 0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glBindTexture(GL_TEXTURE_2D, 0)
class CompositeRenderer(VolumeRenderer, SolidRenderer):
def __init__(self):
VolumeRenderer.__init__(self)
SolidRenderer.__init__(self)
def render(self, width, height, VMatrix, PMatrix):
for solid_object in self.solid_objects:
if solid_object.active:
self._render_solid_obj(solid_object, width, height, VMatrix, PMatrix)
for volume_object in self.volume_objects:
if volume_object.active:
self._render_volume_obj(volume_object, width, height, VMatrix, PMatrix)
class BaseWindow(object):
def __init__(self, title, width, height):
self.title = title
self.width = width
self.height = height
self.PMatrix = np.eye(4)
self.VMatrix = np.eye(4)
self.initialise_window()
def initialise_window(self):
"""Subclasses need to implement this."""
raise(NotImplementedError())
class BaseGlutWindow(BaseWindow):
def initialise_window(self):
OpenGL.GLUT.glutInit([])
OpenGL.GLUT.glutInitContextVersion(3, 2)
OpenGL.GLUT.glutInitWindowSize(self.width, self.height)
OpenGL.GLUT.glutInitDisplayMode(OpenGL.GLUT.GLUT_RGBA
| OpenGL.GLUT.GLUT_DEPTH
| OpenGL.GLUT.GLUT_DOUBLE)
self.window = OpenGL.GLUT.glutCreateWindow("Cell surface")
self.moving = False
self.key_bindings = {"+": self.zoom_in,
"-": self.zoom_out,
"\x1b": self.exit}
self.ball = Arcball()
self.zoom = 0.5
self.dist = 2.0
def zoom_in(self, x=None, y=None):
self.zoom *= 1.1
def zoom_out(self, x=None, y=None):
self.zoom *= 0.9
def exit(self, x=None, y=None):
sys.exit(0)
def on_multi_button(self, bid, x, y, s):
pass
def on_multi_move(self, bid, x, y):
pass
def on_mouse_button(self, b, s, x, y):
self.moving = not s
self.ex, self.ey = x, y
self.ball.down([x, y])
def on_mouse_wheel(self, b, d, x, y):
self.dist += self.dist/15.0 * d
OpenGL.GLUT.glutPostRedisplay()
def on_mouse_move(self, x, y, z=0):
if self.moving:
self.ex, self.ey = x, y
self.ball.drag([x, y])
OpenGL.GLUT.glutPostRedisplay()
def start(self):
self._reshape(self.width, self.height)
OpenGL.GLUT.glutDisplayFunc(self._draw)
OpenGL.GLUT.glutReshapeFunc(self._reshape)
OpenGL.GLUT.glutKeyboardFunc(self.key)
OpenGL.GLUT.glutMouseFunc(self.on_mouse_button)
OpenGL.GLUT.glutMouseWheelFunc(self.on_mouse_button)
OpenGL.GLUT.glutMotionFunc(self.on_mouse_move)
OpenGL.GLUT.glutMainLoop()
def _reshape(self, width, height):
self.width = width
self.height = height
glViewport(0, 0, width, height)
self.PMatrix = perspective(40.0, float(width)/height, 0.1, 10000.0)
self.ball.place([width/2, height/2], height/2)
self.reshape_hook()
OpenGL.GLUT.glutPostRedisplay()
def key(self, k, x, y):
if k in self.key_bindings:
func = self.key_bindings[k]
func(x, y)
OpenGL.GLUT.glutPostRedisplay()
def _draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glClearColor(0.0, 0.0, 0.0, 1.0)
view_mat = translation_matrix((0, 0, -self.dist))
view_mat = view_mat.dot(self.ball.matrix())
view_mat = view_mat.dot(scale_matrix(self.zoom))
self.VMatrix = view_mat
self.draw_hook()
OpenGL.GLUT.glutSwapBuffers()
def draw_hook(self):
raise(NotImplementedError())
def reshape_hook(self):
raise(NotImplementedError())
|
mit
|
suyashphadtare/vestasi-update-erp
|
erpnext/stock/doctype/material_request/material_request.py
|
4
|
12308
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.buying_controller import BuyingController
form_grid_templates = {
"indent_details": "templates/form_grid/material_request_grid.html"
}
class MaterialRequest(BuyingController):
tname = 'Material Request Item'
fname = 'indent_details'
def check_if_already_pulled(self):
pass#if self.[d.sales_order_no for d in self.get('indent_details')]
def validate_qty_against_so(self):
so_items = {} # Format --> {'SO/00001': {'Item/001': 120, 'Item/002': 24}}
for d in self.get('indent_details'):
if d.sales_order_no:
if not so_items.has_key(d.sales_order_no):
so_items[d.sales_order_no] = {d.item_code: flt(d.qty)}
else:
if not so_items[d.sales_order_no].has_key(d.item_code):
so_items[d.sales_order_no][d.item_code] = flt(d.qty)
else:
so_items[d.sales_order_no][d.item_code] += flt(d.qty)
for so_no in so_items.keys():
for item in so_items[so_no].keys():
already_indented = frappe.db.sql("""select sum(ifnull(qty, 0))
from `tabMaterial Request Item`
where item_code = %s and sales_order_no = %s and
docstatus = 1 and parent != %s""", (item, so_no, self.name))
already_indented = already_indented and flt(already_indented[0][0]) or 0
actual_so_qty = frappe.db.sql("""select sum(ifnull(qty, 0)) from `tabSales Order Item`
where parent = %s and item_code = %s and docstatus = 1""", (so_no, item))
actual_so_qty = actual_so_qty and flt(actual_so_qty[0][0]) or 0
if actual_so_qty and (flt(so_items[so_no][item]) + already_indented > actual_so_qty):
frappe.throw(_("Material Request of maximum {0} can be made for Item {1} against Sales Order {2}").format(actual_so_qty - already_indented, item, so_no))
def validate_schedule_date(self):
for d in self.get('indent_details'):
if d.schedule_date and d.schedule_date < self.transaction_date:
frappe.throw(_("Expected Date cannot be before Material Request Date"))
# Validate
# ---------------------
def validate(self):
super(MaterialRequest, self).validate()
self.validate_schedule_date()
self.validate_uom_is_integer("uom", "qty")
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped", "Cancelled"])
self.validate_value("material_request_type", "in", ["Purchase", "Transfer"])
pc_obj = frappe.get_doc('Purchase Common')
pc_obj.validate_for_items(self)
# self.validate_qty_against_so()
# NOTE: Since Item BOM and FG quantities are combined, using current data, it cannot be validated
# Though the creation of Material Request from a Production Plan can be rethought to fix this
def update_bin(self, is_submit, is_stopped):
""" Update Quantity Requested for Purchase in Bin for Material Request of type 'Purchase'"""
from erpnext.stock.utils import update_bin
for d in self.get('indent_details'):
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == "Yes":
if not d.warehouse:
frappe.throw(_("Warehouse required for stock Item {0}").format(d.item_code))
qty =flt(d.qty)
if is_stopped:
qty = (d.qty > d.ordered_qty) and flt(flt(d.qty) - flt(d.ordered_qty)) or 0
args = {
"item_code": d.item_code,
"warehouse": d.warehouse,
"indented_qty": (is_submit and 1 or -1) * flt(qty),
"posting_date": self.transaction_date
}
update_bin(args)
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
self.update_bin(is_submit = 1, is_stopped = 0)
def check_modified_date(self):
mod_db = frappe.db.sql("""select modified from `tabMaterial Request` where name = %s""",
self.name)
date_diff = frappe.db.sql("""select TIMEDIFF('%s', '%s')"""
% (mod_db[0][0], cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(_(self.doctype), self.name))
def update_status(self, status):
self.check_modified_date()
self.update_bin(is_submit = (status == 'Submitted') and 1 or 0, is_stopped = 1)
frappe.db.set(self, 'status', cstr(status))
frappe.msgprint(_("Status updated to {0}").format(_(status)))
def on_cancel(self):
# Step 1:=> Get Purchase Common Obj
pc_obj = frappe.get_doc('Purchase Common')
# Step 2:=> Check for stopped status
pc_obj.check_for_stopped_status(self.doctype, self.name)
# Step 3:=> Check if Purchase Order has been submitted against current Material Request
pc_obj.check_docstatus(check = 'Next', doctype = 'Purchase Order', docname = self.name, detail_doctype = 'Purchase Order Item')
# Step 4:=> Update Bin
self.update_bin(is_submit = 0, is_stopped = (cstr(self.status) == 'Stopped') and 1 or 0)
# Step 5:=> Set Status
frappe.db.set(self,'status','Cancelled')
def update_completed_qty(self, mr_items=None):
if self.material_request_type != "Transfer":
return
item_doclist = self.get("indent_details")
if not mr_items:
mr_items = [d.name for d in item_doclist]
per_ordered = 0.0
for d in item_doclist:
if d.name in mr_items:
d.ordered_qty = flt(frappe.db.sql("""select sum(transfer_qty)
from `tabStock Entry Detail` where material_request = %s
and material_request_item = %s and docstatus = 1""",
(self.name, d.name))[0][0])
frappe.db.set_value(d.doctype, d.name, "ordered_qty", d.ordered_qty)
# note: if qty is 0, its row is still counted in len(item_doclist)
# hence adding 1 to per_ordered
if (d.ordered_qty > d.qty) or not d.qty:
per_ordered += 1.0
elif d.qty > 0:
per_ordered += flt(d.ordered_qty / flt(d.qty))
self.per_ordered = flt((per_ordered / flt(len(item_doclist))) * 100.0, 2)
frappe.db.set_value(self.doctype, self.name, "per_ordered", self.per_ordered)
def update_completed_qty(doc, method):
if doc.doctype == "Stock Entry":
material_request_map = {}
for d in doc.get("mtn_details"):
if d.material_request:
material_request_map.setdefault(d.material_request, []).append(d.material_request_item)
for mr_name, mr_items in material_request_map.items():
mr_obj = frappe.get_doc("Material Request", mr_name)
if mr_obj.status in ["Stopped", "Cancelled"]:
frappe.throw(_("Material Request {0} is cancelled or stopped").format(mr_obj.name),
frappe.InvalidStatusError)
_update_requested_qty(doc, mr_obj, mr_items)
# update ordered percentage and qty
mr_obj.update_completed_qty(mr_items)
def _update_requested_qty(doc, mr_obj, mr_items):
"""update requested qty (before ordered_qty is updated)"""
from erpnext.stock.utils import update_bin
for mr_item_name in mr_items:
mr_item = mr_obj.get("indent_details", {"name": mr_item_name})
se_detail = doc.get("mtn_details", {"material_request": mr_obj.name,
"material_request_item": mr_item_name})
if mr_item and se_detail:
mr_item = mr_item[0]
se_detail = se_detail[0]
mr_item.ordered_qty = flt(mr_item.ordered_qty)
mr_item.qty = flt(mr_item.qty)
se_detail.transfer_qty = flt(se_detail.transfer_qty)
if se_detail.docstatus == 2 and mr_item.ordered_qty > mr_item.qty \
and se_detail.transfer_qty == mr_item.ordered_qty:
add_indented_qty = mr_item.qty
elif se_detail.docstatus == 1 and \
mr_item.ordered_qty + se_detail.transfer_qty > mr_item.qty:
add_indented_qty = mr_item.qty - mr_item.ordered_qty
else:
add_indented_qty = se_detail.transfer_qty
update_bin({
"item_code": se_detail.item_code,
"warehouse": se_detail.t_warehouse,
"indented_qty": (se_detail.docstatus==2 and 1 or -1) * add_indented_qty,
"posting_date": doc.posting_date,
})
def set_missing_values(source, target_doc):
target_doc.run_method("set_missing_values")
target_doc.run_method("calculate_taxes_and_totals")
def update_item(obj, target, source_parent):
target.conversion_factor = 1
target.qty = flt(obj.qty) - flt(obj.ordered_qty)
@frappe.whitelist()
def make_purchase_order(source_name, target_doc=None):
doclist = get_mapped_doc("Material Request", source_name, {
"Material Request": {
"doctype": "Purchase Order",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["=", "Purchase"]
}
},
"Material Request Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "prevdoc_detail_docname"],
["parent", "prevdoc_docname"],
["parenttype", "prevdoc_doctype"],
["uom", "stock_uom"],
["uom", "uom"]
],
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def make_purchase_order_based_on_supplier(source_name, target_doc=None):
if target_doc:
if isinstance(target_doc, basestring):
import json
target_doc = frappe.get_doc(json.loads(target_doc))
target_doc.set("po_details", [])
material_requests, supplier_items = get_material_requests_based_on_supplier(source_name)
def postprocess(source, target_doc):
target_doc.supplier = source_name
set_missing_values(source, target_doc)
target_doc.set("po_details", [d for d in target_doc.get("po_details")
if d.get("item_code") in supplier_items and d.get("qty") > 0])
return target_doc
for mr in material_requests:
target_doc = get_mapped_doc("Material Request", mr, {
"Material Request": {
"doctype": "Purchase Order",
},
"Material Request Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "prevdoc_detail_docname"],
["parent", "prevdoc_docname"],
["parenttype", "prevdoc_doctype"],
["uom", "stock_uom"],
["uom", "uom"]
],
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty
}
}, target_doc, postprocess)
return target_doc
def get_material_requests_based_on_supplier(supplier):
supplier_items = [d[0] for d in frappe.db.get_values("Item",
{"default_supplier": supplier})]
if supplier_items:
material_requests = frappe.db.sql_list("""select distinct mr.name
from `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
where mr.name = mr_item.parent
and mr_item.item_code in (%s)
and mr.material_request_type = 'Purchase'
and ifnull(mr.per_ordered, 0) < 99.99
and mr.docstatus = 1
and mr.status != 'Stopped'""" % ', '.join(['%s']*len(supplier_items)),
tuple(supplier_items))
else:
material_requests = []
return material_requests, supplier_items
@frappe.whitelist()
def make_supplier_quotation(source_name, target_doc=None):
doclist = get_mapped_doc("Material Request", source_name, {
"Material Request": {
"doctype": "Supplier Quotation",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["=", "Purchase"]
}
},
"Material Request Item": {
"doctype": "Supplier Quotation Item",
"field_map": {
"name": "prevdoc_detail_docname",
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
}
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def make_stock_entry(source_name, target_doc=None):
def update_item(obj, target, source_parent):
target.conversion_factor = 1
target.qty = flt(obj.qty) - flt(obj.ordered_qty)
target.transfer_qty = flt(obj.qty) - flt(obj.ordered_qty)
def set_missing_values(source, target):
target.purpose = "Material Transfer"
target.run_method("get_stock_and_rate")
doclist = get_mapped_doc("Material Request", source_name, {
"Material Request": {
"doctype": "Stock Entry",
"validation": {
"docstatus": ["=", 1],
"material_request_type": ["=", "Transfer"]
}
},
"Material Request Item": {
"doctype": "Stock Entry Detail",
"field_map": {
"name": "material_request_item",
"parent": "material_request",
"uom": "stock_uom",
"warehouse": "t_warehouse"
},
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty
}
}, target_doc, set_missing_values)
return doclist
|
agpl-3.0
|
40223117cda/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/queue.py
|
818
|
8835
|
'''A multi-producer, multi-consumer queue.'''
try:
import threading
except ImportError:
import dummy_threading as threading
from collections import deque
from heapq import heappush, heappop
try:
from time import monotonic as time
except ImportError:
from time import time
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
'Exception raised by Queue.get(block=0)/get_nowait().'
pass
class Full(Exception):
'Exception raised by Queue.put(block=0)/put_nowait().'
pass
class Queue:
'''Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
'''
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
'''Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
'''
with self.all_tasks_done:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
def join(self):
'''Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
'''
with self.all_tasks_done:
while self.unfinished_tasks:
self.all_tasks_done.wait()
def qsize(self):
'''Return the approximate size of the queue (not reliable!).'''
with self.mutex:
return self._qsize()
def empty(self):
'''Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
'''
with self.mutex:
return not self._qsize()
def full(self):
'''Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
'''
with self.mutex:
return 0 < self.maxsize <= self._qsize()
def put(self, item, block=True, timeout=None):
'''Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
'''
with self.not_full:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
with self.not_empty:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time() + timeout
while not self._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
def put_nowait(self, item):
'''Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
'''
return self.put(item, block=False)
def get_nowait(self):
'''Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
'''
return self.get(block=False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
heappush(self.queue, item)
def _get(self):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
|
gpl-3.0
|
cristiana214/cristianachavez214-cristianachavez
|
python/src/Lib/sgmllib.py
|
306
|
17884
|
"""A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
from warnings import warnpy3k
warnpy3k("the sgmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
|
apache-2.0
|
sho-h/ruby_env
|
devkit/mingw/bin/lib/encodings/utf_16.py
|
88
|
4110
|
""" Python 'utf-16' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_16_encode
def decode(input, errors='strict'):
return codecs.utf_16_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_16_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_16_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_16_be_decode
elif consumed >= 2:
raise UnicodeError("UTF-16 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
codecs.StreamWriter.__init__(self, stream, errors)
self.encoder = None
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_16_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_16_le_decode
elif byteorder == 1:
self.decode = codecs.utf_16_be_decode
elif consumed>=2:
raise UnicodeError,"UTF-16 stream does not start with BOM"
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
mit
|
kmike/splash
|
splash/lua_runner.py
|
3
|
5259
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import abc
import lupa
from splash.render_options import BadOption
from splash.utils import truncated
class ImmediateResult(object):
def __init__(self, value):
self.value = value
class AsyncCommand(object):
def __init__(self, id, name, kwargs):
self.id = id
self.name = name
self.kwargs = kwargs
class ScriptError(BadOption):
def enrich_from_lua_error(self, e):
if not isinstance(e, lupa.LuaError):
return
print("enrich_from_lua_error", self, e)
self_repr = repr(self.args[0])
if self_repr in e.args[0]:
self.args = (e.args[0],) + self.args[1:]
else:
self.args = (e.args[0] + "; " + self_repr,) + self.args[1:]
class BaseScriptRunner(object):
"""
An utility class for running Lua coroutines.
"""
__metaclass__ = abc.ABCMeta
_START_CMD = '__START__'
def __init__(self, lua, log, sandboxed):
"""
:param splash.lua_runtime.SplashLuaRuntime lua: Lua runtime wrapper
:param log: log function
:param bool sandboxed: True if the execution should use sandbox
"""
self.log = log
self.sandboxed = sandboxed
self.lua = lua
self.coro = None
self.result = None
self._waiting_for_result_id = None
def start(self, coro_func, coro_args):
"""
Run the script.
:param callable coro_func: Lua coroutine to start
:param list coro_args: arguments to pass to coro_func
"""
self.coro = coro_func(*coro_args)
self.result = ''
self._waiting_for_result_id = self._START_CMD
self.dispatch(self._waiting_for_result_id)
@abc.abstractmethod
def on_result(self, result):
""" This method is called when the coroutine exits. """
pass
@abc.abstractmethod
def on_async_command(self, cmd):
""" This method is called when AsyncCommand instance is received. """
pass
def on_lua_error(self, lua_exception):
"""
This method is called when an exception happens in a Lua script.
It is called with a lupa.LuaError instance and can raise a custom
ScriptError.
"""
pass
def dispatch(self, cmd_id, *args):
""" Execute the script """
args = args or None
args_repr = truncated("{!r}".format(args), max_length=400, msg="...[long arguments truncated]")
self.log("[lua] dispatch cmd_id={}, args={}".format(cmd_id, args_repr))
self.log(
"[lua] arguments are for command %s, waiting for result of %s" % (cmd_id, self._waiting_for_result_id),
min_level=3,
)
if cmd_id != self._waiting_for_result_id:
self.log("[lua] skipping an out-of-order result {}".format(args_repr), min_level=1)
return
while True:
try:
args = args or None
# Got arguments from an async command; send them to coroutine
# and wait for the next async command.
self.log("[lua] send %s" % args_repr)
cmd = self.coro.send(args) # cmd is a next async command
args = None # don't re-send the same value
cmd_repr = truncated(repr(cmd), max_length=400, msg='...[long result truncated]')
self.log("[lua] got {}".format(cmd_repr))
self._print_instructions_used()
except StopIteration:
# "main" coroutine is stopped;
# previous result is a final result returned from "main"
self.log("[lua] returning result")
try:
res = self.lua.lua2python(self.result)
except ValueError as e:
# can't convert result to a Python object
raise ScriptError("'main' returned bad result. {!s}".format(e))
self._print_instructions_used()
self.on_result(res)
return
except lupa.LuaError as lua_ex:
# Lua script raised an error
self._print_instructions_used()
self.log("[lua] caught LuaError %r" % lua_ex)
self.on_lua_error(lua_ex) # this can also raise a ScriptError
# XXX: are Lua errors bad requests?
raise ScriptError("unhandled Lua error: {!s}".format(lua_ex))
if isinstance(cmd, AsyncCommand):
self.log("[lua] executing {!r}".format(cmd))
self._waiting_for_result_id = cmd.id
self.on_async_command(cmd)
return
elif isinstance(cmd, ImmediateResult):
self.log("[lua] got result {!r}".format(cmd))
args = cmd.value
continue
else:
self.log("[lua] got non-command")
if isinstance(cmd, tuple):
cmd = list(cmd)
self.result = cmd
def _print_instructions_used(self):
if self.sandboxed:
self.log("[lua] instructions used: %d" % self.lua.instruction_count())
|
bsd-3-clause
|
tylertian/Openstack
|
openstack F/swift/swift/container/updater.py
|
4
|
12579
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import signal
import sys
import time
from random import random, shuffle
from tempfile import mkstemp
from eventlet import spawn, patcher, Timeout
import swift.common.db
from swift.container.server import DATADIR
from swift.common.bufferedhttp import http_connect
from swift.common.db import ContainerBroker
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, TRUE_VALUES, dump_recon_cache
from swift.common.daemon import Daemon
from swift.common.http import is_success, HTTP_INTERNAL_SERVER_ERROR
class ContainerUpdater(Daemon):
"""Update container information in account listings."""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='container-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.account_ring = None
self.concurrency = int(conf.get('concurrency', 4))
self.slowdown = float(conf.get('slowdown', 0.01))
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.no_changes = 0
self.successes = 0
self.failures = 0
self.account_suppressions = {}
self.account_suppression_time = \
float(conf.get('account_suppression_time', 60))
self.new_account_suppressions = None
swift.common.db.DB_PREALLOCATION = \
conf.get('db_preallocation', 'f').lower() in TRUE_VALUES
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "container.recon")
def get_account_ring(self):
"""Get the account ring. Load it if it hasn't been yet."""
if not self.account_ring:
self.account_ring = Ring(self.swift_dir, ring_name='account')
return self.account_ring
def get_paths(self):
"""
Get paths to all of the partitions on each drive to be processed.
:returns: a list of paths
"""
paths = []
for device in os.listdir(self.devices):
dev_path = os.path.join(self.devices, device)
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn(_('%s is not mounted'), device)
continue
con_path = os.path.join(dev_path, DATADIR)
if not os.path.exists(con_path):
continue
for partition in os.listdir(con_path):
paths.append(os.path.join(con_path, partition))
shuffle(paths)
return paths
def _load_suppressions(self, filename):
try:
with open(filename, 'r') as tmpfile:
for line in tmpfile:
account, until = line.split()
until = float(until)
self.account_suppressions[account] = until
except Exception:
self.logger.exception(
_('ERROR with loading suppressions from %s: ') % filename)
finally:
os.unlink(filename)
def run_forever(self, *args, **kwargs):
"""
Run the updator continuously.
"""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin container update sweep'))
begin = time.time()
now = time.time()
expired_suppressions = \
[a for a, u in self.account_suppressions.iteritems() if u < now]
for account in expired_suppressions:
del self.account_suppressions[account]
pid2filename = {}
# read from account ring to ensure it's fresh
self.get_account_ring().get_nodes('')
for path in self.get_paths():
while len(pid2filename) >= self.concurrency:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
fd, tmpfilename = mkstemp()
os.close(fd)
pid = os.fork()
if pid:
pid2filename[pid] = tmpfilename
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.no_changes = 0
self.successes = 0
self.failures = 0
self.new_account_suppressions = open(tmpfilename, 'w')
forkbegin = time.time()
self.container_sweep(path)
elapsed = time.time() - forkbegin
self.logger.debug(
_('Container update sweep of %(path)s completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s '
'failures, %(no_change)s with no changes'),
{'path': path, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures,
'no_change': self.no_changes})
sys.exit()
while pid2filename:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
elapsed = time.time() - begin
self.logger.info(_('Container update sweep completed: %.02fs'),
elapsed)
dump_recon_cache({'container_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Run the updater once.
"""
patcher.monkey_patch(all=False, socket=True)
self.logger.info(_('Begin container update single threaded sweep'))
begin = time.time()
self.no_changes = 0
self.successes = 0
self.failures = 0
for path in self.get_paths():
self.container_sweep(path)
elapsed = time.time() - begin
self.logger.info(_('Container update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures, '
'%(no_change)s with no changes'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures, 'no_change': self.no_changes})
dump_recon_cache({'container_updater_sweep': elapsed},
self.rcache, self.logger)
def container_sweep(self, path):
"""
Walk the path looking for container DBs and process them.
:param path: path to walk
"""
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith('.db'):
self.process_container(os.path.join(root, file))
time.sleep(self.slowdown)
def process_container(self, dbfile):
"""
Process a container, and update the information in the account.
:param dbfile: container DB to process
"""
start_time = time.time()
broker = ContainerBroker(dbfile, logger=self.logger)
info = broker.get_info()
# Don't send updates if the container was auto-created since it
# definitely doesn't have up to date statistics.
if float(info['put_timestamp']) <= 0:
return
if self.account_suppressions.get(info['account'], 0) > time.time():
return
if info['put_timestamp'] > info['reported_put_timestamp'] or \
info['delete_timestamp'] > info['reported_delete_timestamp'] \
or info['object_count'] != info['reported_object_count'] or \
info['bytes_used'] != info['reported_bytes_used']:
container = '/%s/%s' % (info['account'], info['container'])
part, nodes = self.get_account_ring().get_nodes(info['account'])
events = [spawn(self.container_report, node, part, container,
info['put_timestamp'], info['delete_timestamp'],
info['object_count'], info['bytes_used'])
for node in nodes]
successes = 0
failures = 0
for event in events:
if is_success(event.wait()):
successes += 1
else:
failures += 1
if successes > failures:
self.logger.increment('successes')
self.successes += 1
self.logger.debug(
_('Update report sent for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
info['bytes_used'])
else:
self.logger.increment('failures')
self.failures += 1
self.logger.debug(
_('Update report failed for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
self.account_suppressions[info['account']] = until = \
time.time() + self.account_suppression_time
if self.new_account_suppressions:
print >>self.new_account_suppressions, \
info['account'], until
# Only track timing data for attempted updates:
self.logger.timing_since('timing', start_time)
else:
self.logger.increment('no_changes')
self.no_changes += 1
def container_report(self, node, part, container, put_timestamp,
delete_timestamp, count, bytes):
"""
Report container info to an account server.
:param node: node dictionary from the account ring
:param part: partition the account is on
:param container: container name
:param put_timestamp: put timestamp
:param delete_timestamp: delete timestamp
:param count: object count in the container
:param bytes: bytes used in the container
"""
with ConnectionTimeout(self.conn_timeout):
try:
conn = http_connect(
node['ip'], node['port'], node['device'], part,
'PUT', container,
headers={'X-Put-Timestamp': put_timestamp,
'X-Delete-Timestamp': delete_timestamp,
'X-Object-Count': count,
'X-Bytes-Used': bytes,
'X-Account-Override-Deleted': 'yes'})
except (Exception, Timeout):
self.logger.exception(_('ERROR account update failed with '
'%(ip)s:%(port)s/%(device)s (will retry later): '), node)
return HTTP_INTERNAL_SERVER_ERROR
with Timeout(self.node_timeout):
try:
resp = conn.getresponse()
resp.read()
return resp.status
except (Exception, Timeout):
if self.logger.getEffectiveLevel() <= logging.DEBUG:
self.logger.exception(
_('Exception with %(ip)s:%(port)s/%(device)s'), node)
return HTTP_INTERNAL_SERVER_ERROR
|
apache-2.0
|
xisisu/RT-Xen
|
tools/python/xen/xm/cpupool-new.py
|
41
|
1702
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2009 Fujitsu Technology Solutions
#============================================================================
""" Create a new managed cpupool.
"""
import sys
from xen.xm.main import serverType, SERVER_XEN_API, server
from xen.xm.cpupool import parseCommandLine, err, help as help_options
from xen.util.sxputils import sxp2map
def help():
return help_options()
def main(argv):
try:
(opts, config) = parseCommandLine(argv)
except StandardError, ex:
err(str(ex))
if not opts:
return
if serverType == SERVER_XEN_API:
record = sxp2map(config)
if type(record.get('proposed_CPUs', [])) != list:
record['proposed_CPUs'] = [record['proposed_CPUs']]
server.xenapi.cpu_pool.create(record)
else:
server.xend.cpu_pool.new(config)
if __name__ == '__main__':
main(sys.argv)
|
gpl-2.0
|
chauhanhardik/populo_2
|
common/djangoapps/third_party_auth/models.py
|
7
|
19981
|
# -*- coding: utf-8 -*-
"""
Models used to implement SAML SSO support in third_party_auth
(inlcuding Shibboleth support)
"""
from config_models.models import ConfigurationModel, cache
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import json
import logging
from social.backends.base import BaseAuth
from social.backends.oauth import OAuthAuth
from social.backends.saml import SAMLAuth, SAMLIdentityProvider
from social.exceptions import SocialAuthBaseException
from social.utils import module_member
log = logging.getLogger(__name__)
# A dictionary of {name: class} entries for each python-social-auth backend available.
# Because this setting can specify arbitrary code to load and execute, it is set via
# normal Django settings only and cannot be changed at runtime:
def _load_backend_classes(base_class=BaseAuth):
""" Load the list of python-social-auth backend classes from Django settings """
for class_path in settings.AUTHENTICATION_BACKENDS:
auth_class = module_member(class_path)
if issubclass(auth_class, base_class):
yield auth_class
_PSA_BACKENDS = {backend_class.name: backend_class for backend_class in _load_backend_classes()}
_PSA_OAUTH2_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(OAuthAuth)]
_PSA_SAML_BACKENDS = [backend_class.name for backend_class in _load_backend_classes(SAMLAuth)]
def clean_json(value, of_type):
""" Simple helper method to parse and clean JSON """
if not value.strip():
return json.dumps(of_type())
try:
value_python = json.loads(value)
except ValueError as err:
raise ValidationError("Invalid JSON: {}".format(err.message))
if not isinstance(value_python, of_type):
raise ValidationError("Expected a JSON {}".format(of_type))
return json.dumps(value_python, indent=4)
class AuthNotConfigured(SocialAuthBaseException):
""" Exception when SAMLProviderData or other required info is missing """
def __init__(self, provider_name):
super(AuthNotConfigured, self).__init__()
self.provider_name = provider_name
def __str__(self):
return _('Authentication with {} is currently unavailable.').format( # pylint: disable=no-member
self.provider_name
)
class ProviderConfig(ConfigurationModel):
"""
Abstract Base Class for configuring a third_party_auth provider
"""
icon_class = models.CharField(
max_length=50, default='fa-sign-in',
help_text=(
'The Font Awesome (or custom) icon class to use on the login button for this provider. '
'Examples: fa-google-plus, fa-facebook, fa-linkedin, fa-sign-in, fa-university'
),
)
name = models.CharField(max_length=50, blank=False, help_text="Name of this provider (shown to users)")
secondary = models.BooleanField(
default=False,
help_text=_(
'Secondary providers are displayed less prominently, '
'in a separate list of "Institution" login providers.'
),
)
skip_registration_form = models.BooleanField(
default=False,
help_text=_(
"If this option is enabled, users will not be asked to confirm their details "
"(name, email, etc.) during the registration process. Only select this option "
"for trusted providers that are known to provide accurate user information."
),
)
skip_email_verification = models.BooleanField(
default=False,
help_text=_(
"If this option is selected, users will not be required to confirm their "
"email, and their account will be activated immediately upon registration."
),
)
prefix = None # used for provider_id. Set to a string value in subclass
backend_name = None # Set to a field or fixed value in subclass
# "enabled" field is inherited from ConfigurationModel
class Meta(object): # pylint: disable=missing-docstring
abstract = True
@property
def provider_id(self):
""" Unique string key identifying this provider. Must be URL and css class friendly. """
assert self.prefix is not None
return "-".join((self.prefix, ) + tuple(getattr(self, field) for field in self.KEY_FIELDS))
@property
def backend_class(self):
""" Get the python-social-auth backend class used for this provider """
return _PSA_BACKENDS[self.backend_name]
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
return self.backend_name == social_auth.provider
@classmethod
def get_register_form_data(cls, pipeline_kwargs):
"""Gets dict of data to display on the register form.
common.djangoapps.student.views.register_user uses this to populate the
new account creation form with values supplied by the user's chosen
provider, preventing duplicate data entry.
Args:
pipeline_kwargs: dict of string -> object. Keyword arguments
accumulated by the pipeline thus far.
Returns:
Dict of string -> string. Keys are names of form fields; values are
values for that field. Where there is no value, the empty string
must be used.
"""
# Details about the user sent back from the provider.
details = pipeline_kwargs.get('details')
# Get the username separately to take advantage of the de-duping logic
# built into the pipeline. The provider cannot de-dupe because it can't
# check the state of taken usernames in our system. Note that there is
# technically a data race between the creation of this value and the
# creation of the user object, so it is still possible for users to get
# an error on submit.
suggested_username = pipeline_kwargs.get('username')
return {
'email': details.get('email', ''),
'name': details.get('fullname', ''),
'username': suggested_username,
}
def get_authentication_backend(self):
"""Gets associated Django settings.AUTHENTICATION_BACKEND string."""
return '{}.{}'.format(self.backend_class.__module__, self.backend_class.__name__)
class OAuth2ProviderConfig(ProviderConfig):
"""
Configuration Entry for an OAuth2 based provider.
Also works for OAuth1 providers.
"""
prefix = 'oa2'
KEY_FIELDS = ('backend_name', ) # Backend name is unique
backend_name = models.CharField(
max_length=50, choices=[(name, name) for name in _PSA_OAUTH2_BACKENDS], blank=False, db_index=True,
help_text=(
"Which python-social-auth OAuth2 provider backend to use. "
"The list of backend choices is determined by the THIRD_PARTY_AUTH_BACKENDS setting."
# To be precise, it's set by AUTHENTICATION_BACKENDS - which aws.py sets from THIRD_PARTY_AUTH_BACKENDS
)
)
key = models.TextField(blank=True, verbose_name="Client ID")
secret = models.TextField(
blank=True,
verbose_name="Client Secret",
help_text=(
'For increased security, you can avoid storing this in your database by leaving '
' this field blank and setting '
'SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} '
'in your instance\'s Django settings (or lms.auth.json)'
)
)
other_settings = models.TextField(blank=True, help_text="Optional JSON object with advanced settings, if any.")
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "Provider Configuration (OAuth)"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(OAuth2ProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "KEY":
return self.key
if name == "SECRET":
if self.secret:
return self.secret
# To allow instances to avoid storing secrets in the DB, the secret can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_OAUTH_SECRETS', {}).get(self.backend_name, '')
if self.other_settings:
other_settings = json.loads(self.other_settings)
assert isinstance(other_settings, dict), "other_settings should be a JSON object (dictionary)"
return other_settings[name]
raise KeyError
class SAMLProviderConfig(ProviderConfig):
"""
Configuration Entry for a SAML/Shibboleth provider.
"""
prefix = 'saml'
KEY_FIELDS = ('idp_slug', )
backend_name = models.CharField(
max_length=50, default='tpa-saml', choices=[(name, name) for name in _PSA_SAML_BACKENDS], blank=False,
help_text="Which python-social-auth provider backend to use. 'tpa-saml' is the standard edX SAML backend.")
idp_slug = models.SlugField(
max_length=30, db_index=True,
help_text=(
'A short string uniquely identifying this provider. '
'Cannot contain spaces and should be a usable as a CSS class. Examples: "ubc", "mit-staging"'
))
entity_id = models.CharField(
max_length=255, verbose_name="Entity ID", help_text="Example: https://idp.testshib.org/idp/shibboleth")
metadata_source = models.CharField(
max_length=255,
help_text=(
"URL to this provider's XML metadata. Should be an HTTPS URL. "
"Example: https://www.testshib.org/metadata/testshib-providers.xml"
))
attr_user_permanent_id = models.CharField(
max_length=128, blank=True, verbose_name="User ID Attribute",
help_text="URN of the SAML attribute that we can use as a unique, persistent user ID. Leave blank for default.")
attr_full_name = models.CharField(
max_length=128, blank=True, verbose_name="Full Name Attribute",
help_text="URN of SAML attribute containing the user's full name. Leave blank for default.")
attr_first_name = models.CharField(
max_length=128, blank=True, verbose_name="First Name Attribute",
help_text="URN of SAML attribute containing the user's first name. Leave blank for default.")
attr_last_name = models.CharField(
max_length=128, blank=True, verbose_name="Last Name Attribute",
help_text="URN of SAML attribute containing the user's last name. Leave blank for default.")
attr_username = models.CharField(
max_length=128, blank=True, verbose_name="Username Hint Attribute",
help_text="URN of SAML attribute to use as a suggested username for this user. Leave blank for default.")
attr_email = models.CharField(
max_length=128, blank=True, verbose_name="Email Attribute",
help_text="URN of SAML attribute containing the user's email address[es]. Leave blank for default.")
other_settings = models.TextField(
verbose_name="Advanced settings", blank=True,
help_text=(
'For advanced use cases, enter a JSON object with addtional configuration. '
'The tpa-saml backend supports only {"requiredEntitlements": ["urn:..."]} '
'which can be used to require the presence of a specific eduPersonEntitlement.'
))
def clean(self):
""" Standardize and validate fields """
super(SAMLProviderConfig, self).clean()
self.other_settings = clean_json(self.other_settings, dict)
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "Provider Configuration (SAML IdP)"
verbose_name_plural = "Provider Configuration (SAML IdPs)"
def get_url_params(self):
""" Get a dict of GET parameters to append to login links for this provider """
return {'idp': self.idp_slug}
def is_active_for_pipeline(self, pipeline):
""" Is this provider being used for the specified pipeline? """
return self.backend_name == pipeline['backend'] and self.idp_slug == pipeline['kwargs']['response']['idp_name']
def match_social_auth(self, social_auth):
""" Is this provider being used for this UserSocialAuth entry? """
prefix = self.idp_slug + ":"
return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix)
def get_config(self):
"""
Return a SAMLIdentityProvider instance for use by SAMLAuthBackend.
Essentially this just returns the values of this object and its
associated 'SAMLProviderData' entry.
"""
if self.other_settings:
conf = json.loads(self.other_settings)
else:
conf = {}
attrs = (
'attr_user_permanent_id', 'attr_full_name', 'attr_first_name',
'attr_last_name', 'attr_username', 'attr_email', 'entity_id')
for field in attrs:
val = getattr(self, field)
if val:
conf[field] = val
# Now get the data fetched automatically from the metadata.xml:
data = SAMLProviderData.current(self.entity_id)
if not data or not data.is_valid():
log.error("No SAMLProviderData found for %s. Run 'manage.py saml pull' to fix or debug.", self.entity_id)
raise AuthNotConfigured(provider_name=self.name)
conf['x509cert'] = data.public_key
conf['url'] = data.sso_url
return SAMLIdentityProvider(self.idp_slug, **conf)
class SAMLConfiguration(ConfigurationModel):
"""
General configuration required for this edX instance to act as a SAML
Service Provider and allow users to authenticate via third party SAML
Identity Providers (IdPs)
"""
private_key = models.TextField(
help_text=(
'To generate a key pair as two files, run '
'"openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". '
'Paste the contents of saml.key here. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
public_key = models.TextField(
help_text=(
'Public key certificate. '
'For increased security, you can avoid storing this in your database by leaving '
'this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting '
'in your instance\'s Django settings (or lms.auth.json).'
),
blank=True,
)
entity_id = models.CharField(max_length=255, default="http://saml.example.com", verbose_name="Entity ID")
org_info_str = models.TextField(
verbose_name="Organization Info",
default='{"en-US": {"url": "http://www.example.com", "displayname": "Example Inc.", "name": "example"}}',
help_text="JSON dictionary of 'url', 'displayname', and 'name' for each language",
)
other_config_str = models.TextField(
default='{\n"SECURITY_CONFIG": {"metadataCacheDuration": 604800, "signMetadata": false}\n}',
help_text=(
"JSON object defining advanced settings that are passed on to python-saml. "
"Valid keys that can be set here include: SECURITY_CONFIG and SP_EXTRA"
),
)
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "SAML Configuration"
verbose_name_plural = verbose_name
def clean(self):
""" Standardize and validate fields """
super(SAMLConfiguration, self).clean()
self.org_info_str = clean_json(self.org_info_str, dict)
self.other_config_str = clean_json(self.other_config_str, dict)
self.private_key = (
self.private_key
.replace("-----BEGIN RSA PRIVATE KEY-----", "")
.replace("-----BEGIN PRIVATE KEY-----", "")
.replace("-----END RSA PRIVATE KEY-----", "")
.replace("-----END PRIVATE KEY-----", "")
.strip()
)
self.public_key = (
self.public_key
.replace("-----BEGIN CERTIFICATE-----", "")
.replace("-----END CERTIFICATE-----", "")
.strip()
)
def get_setting(self, name):
""" Get the value of a setting, or raise KeyError """
if name == "ORG_INFO":
return json.loads(self.org_info_str)
if name == "SP_ENTITY_ID":
return self.entity_id
if name == "SP_PUBLIC_CERT":
if self.public_key:
return self.public_key
# To allow instances to avoid storing keys in the DB, the key pair can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PUBLIC_CERT', '')
if name == "SP_PRIVATE_KEY":
if self.private_key:
return self.private_key
# To allow instances to avoid storing keys in the DB, the private key can also be set via Django:
return getattr(settings, 'SOCIAL_AUTH_SAML_SP_PRIVATE_KEY', '')
other_config = json.loads(self.other_config_str)
if name in ("TECHNICAL_CONTACT", "SUPPORT_CONTACT"):
contact = {
"givenName": "{} Support".format(settings.PLATFORM_NAME),
"emailAddress": settings.TECH_SUPPORT_EMAIL
}
contact.update(other_config.get(name, {}))
return contact
return other_config[name] # SECURITY_CONFIG, SP_EXTRA, or similar extra settings
class SAMLProviderData(models.Model):
"""
Data about a SAML IdP that is fetched automatically by 'manage.py saml pull'
This data is only required during the actual authentication process.
"""
cache_timeout = 600
fetched_at = models.DateTimeField(db_index=True, null=False)
expires_at = models.DateTimeField(db_index=True, null=True)
entity_id = models.CharField(max_length=255, db_index=True) # This is the key for lookups in this table
sso_url = models.URLField(verbose_name="SSO URL")
public_key = models.TextField()
class Meta(object): # pylint: disable=missing-docstring
verbose_name = "SAML Provider Data"
verbose_name_plural = verbose_name
ordering = ('-fetched_at', )
def is_valid(self):
""" Is this data valid? """
if self.expires_at and timezone.now() > self.expires_at:
return False
return bool(self.entity_id and self.sso_url and self.public_key)
is_valid.boolean = True
@classmethod
def cache_key_name(cls, entity_id):
""" Return the name of the key to use to cache the current data """
return 'configuration/{}/current/{}'.format(cls.__name__, entity_id)
@classmethod
def current(cls, entity_id):
"""
Return the active data entry, if any, otherwise None
"""
cached = cache.get(cls.cache_key_name(entity_id))
if cached is not None:
return cached
try:
current = cls.objects.filter(entity_id=entity_id).order_by('-fetched_at')[0]
except IndexError:
current = None
cache.set(cls.cache_key_name(entity_id), current, cls.cache_timeout)
return current
|
agpl-3.0
|
wandec/grr
|
config/client.py
|
2
|
10674
|
#!/usr/bin/env python
"""Configuration parameters for the client."""
from grr.lib import config_lib
from grr.lib import rdfvalue
# General Client options.
config_lib.DEFINE_string("Client.name", "GRR",
"The name of the client. This will be used as a base "
"name to generate many other default parameters such "
"as binary names and service names. Note that on "
"Linux we lowercase the name to confirm with most "
"linux naming conventions.")
config_lib.DEFINE_string("Client.binary_name", "%(Client.name)",
"The name of the client binary.")
config_lib.DEFINE_list("Client.labels", [],
"Labels for this client.")
config_lib.DEFINE_string("Client.company_name", "GRR Project",
"The name of the company which made the client.")
config_lib.DEFINE_string("Client.description", "%(name) %(platform) %(arch)",
"A description of this specific client build.")
config_lib.DEFINE_string("Client.platform", "windows",
"The platform we are running on.")
config_lib.DEFINE_string("Client.arch", "amd64",
"The architecture we are running on.")
config_lib.DEFINE_string("Client.build_time", "Unknown",
"The time the client was built.")
config_lib.DEFINE_string(
name="Client.install_path",
default=r"%(SystemRoot|env)\\System32\\%(name)\\%(version_string)",
help="Where the client binaries are installed.")
config_lib.DEFINE_string(
name="Client.rekall_profile_cache_path",
default=r"%(Client.install_path)\\rekall_profiles",
help="Where GRR stores cached Rekall profiles needed for memory analysis")
config_lib.DEFINE_list("Client.control_urls",
["http://www.example.com/control"],
"List of URLs of the controlling server.")
config_lib.DEFINE_string("Client.plist_path",
"/Library/LaunchDaemons/com.google.code.grrd.plist",
"Location of our launchctl plist.")
config_lib.DEFINE_string("Client.plist_filename", None,
"Filename of launchctl plist.")
config_lib.DEFINE_string("Client.plist_label",
None,
"Identifier label for launchd")
config_lib.DEFINE_string("Client.plist_label_prefix", None,
"Domain for launchd label.")
config_lib.DEFINE_float("Client.poll_min", 0.2,
"Minimum time between polls in seconds.")
config_lib.DEFINE_float("Client.poll_max", 600,
"Maximum time between polls in seconds.")
config_lib.DEFINE_float("Client.error_poll_min", 15,
"Minimum time between polls in seconds if the server "
"reported an error.")
config_lib.DEFINE_float("Client.poll_slew", 1.15,
"Slew of poll time.")
config_lib.DEFINE_integer("Client.connection_error_limit", 60 * 24,
"If the client encounters this many connection "
"errors, it exits and restarts. Retries are one "
"minute apart.")
config_lib.DEFINE_list(
name="Client.proxy_servers",
help="List of valid proxy servers the client should try.",
default=[])
config_lib.DEFINE_integer("Client.max_post_size", 8000000,
"Maximum size of the post.")
config_lib.DEFINE_integer("Client.max_out_queue", 10240000,
"Maximum size of the output queue.")
config_lib.DEFINE_integer("Client.foreman_check_frequency", 1800,
"The minimum number of seconds before checking with "
"the foreman for new work.")
config_lib.DEFINE_float("Client.rss_max", 500,
"Maximum memory footprint in MB.")
config_lib.DEFINE_string(
name="Client.tempfile_prefix",
help="Prefix to use for temp files created by the GRR client.",
default="tmp%(Client.name)")
config_lib.DEFINE_string(
name="Client.tempdir",
help="Default temporary directory to use on the client.",
default="/var/tmp/%(Client.name)/")
config_lib.DEFINE_integer("Client.version_major", 0,
"Major version number of client binary.")
config_lib.DEFINE_integer("Client.version_minor", 0,
"Minor version number of client binary.")
config_lib.DEFINE_integer("Client.version_revision", 0,
"Revision number of client binary.")
config_lib.DEFINE_integer("Client.version_release", 0,
"Release number of client binary.")
config_lib.DEFINE_string("Client.version_string",
"%(version_major).%(version_minor)."
"%(version_revision).%(version_release)",
"Version string of the client.")
config_lib.DEFINE_integer("Client.version_numeric",
"%(version_major)%(version_minor)"
"%(version_revision)%(version_release)",
"Version string of the client as an integer.")
config_lib.DEFINE_list("Client.plugins", [],
help="Additional Plugin paths loaded by the client.")
# Windows client specific options.
config_lib.DEFINE_string("Client.config_hive", r"HKEY_LOCAL_MACHINE",
help="The registry hive where the client "
"configuration will be stored.")
config_lib.DEFINE_string("Client.config_key", r"Software\\GRR",
help="The registry key where client configuration "
"will be stored.")
# Client Cryptographic options.
config_lib.DEFINE_semantic(
rdfvalue.PEMPrivateKey, "Client.private_key",
description="Client private key in pem format. If not provided this "
"will be generated by the enrollment process.",
)
config_lib.DEFINE_semantic(
rdfvalue.RDFX509Cert, "CA.certificate",
description="Trusted CA certificate in X509 pem format",
)
config_lib.DEFINE_semantic(
rdfvalue.PEMPublicKey, "Client.executable_signing_public_key",
description="public key for verifying executable signing.")
config_lib.DEFINE_semantic(
rdfvalue.PEMPrivateKey, "PrivateKeys.executable_signing_private_key",
description="Private keys for signing executables. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_semantic(
rdfvalue.PEMPublicKey, "Client.driver_signing_public_key",
description="public key for verifying driver signing.")
config_lib.DEFINE_semantic(
rdfvalue.PEMPrivateKey, "PrivateKeys.driver_signing_private_key",
description="Private keys for signing drivers. NOTE: This "
"key is usually kept offline and is thus not present in the "
"configuration file.")
config_lib.DEFINE_integer("Client.server_serial_number", 0,
"Minimal serial number we accept for server cert.")
# The following configuration options are defined here but are used in
# the windows nanny code (grr/client/nanny/windows_nanny.h).
config_lib.DEFINE_string("Nanny.child_binary", "GRR.exe",
help="The location to the client binary.")
config_lib.DEFINE_string("Nanny.child_command_line", "%(Nanny.child_binary)",
help="The command line to launch the client binary.")
config_lib.DEFINE_string("Nanny.logfile", "%(Logging.path)/nanny.log",
"The file where we write the nanny transaction log.")
config_lib.DEFINE_string("Nanny.service_name", "GRR Service",
help="The name of the nanny.")
config_lib.DEFINE_string("Nanny.service_description", "GRR Service",
help="The description of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key", r"%(Client.config_key)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.service_key_hive", r"%(Client.config_hive)",
help="The registry key of the nanny service.")
config_lib.DEFINE_string("Nanny.statusfile", "%(Logging.path)/nanny.status",
"The file where we write the nanny status.")
config_lib.DEFINE_string("Nanny.status", "",
"The regkey where we write the nanny status.")
config_lib.DEFINE_string("Nanny.binary",
r"%(Client.install_path)\\%(service_binary_name)",
help="The full location to the nanny binary.")
config_lib.DEFINE_string("Nanny.service_binary_name",
"%(Client.name)service.exe",
help="The executable name of the nanny binary.")
config_lib.DEFINE_integer("Nanny.unresponsive_kill_period", 60,
"The time in seconds after which the nanny kills us.")
config_lib.DEFINE_integer("Network.api", 3,
"The version of the network protocol the client "
"uses.")
config_lib.DEFINE_string("Network.compression", default="ZCOMPRESS",
help="Type of compression (ZCOMPRESS, UNCOMPRESSED)")
# Installer options.
config_lib.DEFINE_string(
name="Installer.logfile",
default="%(Logging.path)/%(Client.name)_installer.txt",
help=("A specific log file which is used for logging the "
"installation process."))
config_lib.DEFINE_list(
"Installer.old_key_map", [
"HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate->Client.private_key",
"HKEY_LOCAL_MACHINE\\Software\\GRR\\server_serial_number"
"->Client.server_serial_number",
],
"""
A mapping of old registry values which will be copied to new values. The old
value location must start with a valid hive name, followed by a key name, and
end with the value name. The source location must be separated from the new
parameter name by a -> symbol.
This setting allows to carry over settings from obsolete client installations to
newer versions of the client which may store the same information in other
locations.
For example:
HKEY_LOCAL_MACHINE\\Software\\GRR\\certificate -> Client.private_key
""")
config_lib.DEFINE_string("Installer.old_writeback", "/usr/lib/grr/grrd.conf",
"OS X and linux client installers will check this "
"location for old config data that should be "
"preserved.")
|
apache-2.0
|
obsrvbl/ona
|
src/scripts/ona_service/pna_pusher.py
|
1
|
1769
|
# Copyright 2015 Observable Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python builtins
import logging
from os import getenv
# local
from ona_service.pusher import Pusher
# determine compression to use for transfer
try:
import bz2 # noqa
TAR_MODE = 'w:bz2'
except ImportError:
import gzip # noqa
TAR_MODE = 'w:gz'
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
ENV_PNA_LOGDIR = 'PNA_LOGDIR'
DEFAULT_PNA_LOGDIR = './logs'
POLL_SECONDS = 30
class PnaPusher(Pusher):
"""
The PNA software writes logs every 10 seconds. These are aggregated on
ten minute intervals (HHMs), compressed, and written to the Observable
cloud for processing.
"""
def __init__(self, *args, **kwargs):
kwargs.update({
'data_type': 'pna',
'file_fmt': 'pna-%Y%m%d%H%M',
'prefix_len': 16,
'input_dir': getenv(ENV_PNA_LOGDIR, DEFAULT_PNA_LOGDIR),
'poll_seconds': POLL_SECONDS,
})
# archives will be compressed before transmission
self.tar_mode = TAR_MODE
super().__init__(*args, **kwargs)
if __name__ == '__main__':
pusher = PnaPusher()
pusher.run()
|
apache-2.0
|
ycaihua/kbengine
|
kbe/res/scripts/common/Lib/site-packages/pip/util.py
|
343
|
24172
|
import sys
import shutil
import os
import stat
import re
import posixpath
import zipfile
import tarfile
import subprocess
import textwrap
from pip.exceptions import InstallationError, BadCommand, PipError
from pip.backwardcompat import(WindowsError, string_types, raw_input,
console_to_str, user_site, PermissionError)
from pip.locations import site_packages, running_under_virtualenv, virtualenv_no_global
from pip.log import logger
from pip._vendor import pkg_resources
from pip._vendor.distlib import version
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'create_download_cache_folder',
'cache_download', 'unpack_file', 'call_subprocess']
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
exctype, value = exc_info[:2]
if not ((exctype is WindowsError and value.args[0] == 5) or #others
(exctype is OSError and value.args[0] == 13) or #python2.4
(exctype is PermissionError and value.args[3] == 5) #python3.3
):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd)
def get_pathext(default_pathext=None):
"""Returns the path extensions from environment or a default"""
if default_pathext is None:
default_pathext = os.pathsep.join(['.COM', '.EXE', '.BAT', '.CMD'])
pathext = os.environ.get('PATHEXT', default_pathext)
return pathext
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message)
response = raw_input(message)
response = response.strip().lower()
if response not in options:
print('Your response (%r) was not one of the expected responses: %s' % (
response, ', '.join(options)))
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __eq__(self, other):
if self is other:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __repr__(self):
return 'Inf'
Inf = _Inf() #this object is not currently used as a sortable in our code
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000*1000:
return '%.1fMB' % (bytes/1000.0/1000)
elif bytes > 10*1000:
return '%ikB' % (bytes/1000)
elif bytes > 1000:
return '%.1fkB' % (bytes/1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
fp = open(filename, 'rb')
try:
return fp.read().decode('utf-8')
finally:
fp.close()
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..']*len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(os.path.expanduser(path)))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
if user_site:
return normalize_path(dist_location(dist)).startswith(normalize_path(user_site))
else:
return False
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in distutils.sysconfig.get_python_lib().
"""
return normalize_path(dist_location(dist)).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
#TODO: factor out determining editableness out of FrozenRequirement
from pip import FrozenRequirement
req = FrozenRequirement.from_dist(dist, [])
return req.editable
def get_installed_distributions(local_only=True,
skip=('setuptools', 'pip', 'python', 'distribute'),
include_editables=True,
editables_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
if include_editables:
editable_test = lambda d: True
else:
editable_test = lambda d: not dist_is_editable(d)
if editables_only:
editables_only_test = lambda d: dist_is_editable(d)
else:
editables_only_test = lambda d: True
return [d for d in pkg_resources.working_set
if local_test(d)
and d.key not in skip
and editable_test(d)
and editables_only_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE (don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2 locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
if cr == (0, 0):
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world (chmod +x)
# no-op on windows per python docs
os.chmod(fn, (0o777-current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warn('Cannot determine compression type for file %s' % filename)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesnt seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
elif member.issym():
try:
tar._extract_member(member, path)
except:
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError):
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777-current_umask() | 0o111))
finally:
tar.close()
def create_download_cache_folder(folder):
logger.indent -= 2
logger.notify('Creating supposed download cache at %s' % folder)
logger.indent += 2
os.makedirs(folder)
def cache_download(target_file, temp_location, content_type):
logger.notify('Storing download in cache at %s' % display_path(target_file))
shutil.copyfile(temp_location, target_file)
fp = open(target_file+'.content-type', 'w')
fp.write(content_type)
fp.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.pybundle')
or filename.endswith('.whl')
or zipfile.is_zipfile(filename)):
unzip_file(filename, location, flatten=not filename.endswith(('.pybundle', '.whl')))
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
## FIXME: handle?
## FIXME: magic signatures?
logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format'
% (filename, location, content_type))
raise InstallationError('Cannot determine archive format of %s' % location)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True,
command_level=logger.DEBUG, command_desc=None,
extra_environ=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.log(command_level, "Running command %s" % command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, command_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
while 1:
line = console_to_str(stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
returned_stdout, returned_stderr = proc.communicate()
all_output = [returned_stdout or '']
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % command_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise InstallationError(
"Command %s failed with error code %s in %s"
% (command_desc, proc.returncode, cwd))
else:
logger.warn(
"Command %s had error code %s in %s"
% (command_desc, proc.returncode, cwd))
if stdout is not None:
return ''.join(all_output)
def is_prerelease(vers):
"""
Attempt to determine if this is a pre-release using PEP386/PEP426 rules.
Will return True if it is a pre-release and False if not. Versions are
assumed to be a pre-release if they cannot be parsed.
"""
normalized = version._suggest_normalized_version(vers)
if normalized is None:
# Cannot normalize, assume it is a pre-release
return True
parsed = version._normalized_key(normalized)
return any([any([y in set(["a", "b", "c", "rc", "dev"]) for y in x]) for x in parsed])
|
lgpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.