repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
sk1p/django-codemirror2
|
examples/examples/settings.py
|
1
|
3206
|
"""
Django settings for examples project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wa(b*8q*p0%!lwukj&na3lo3!57nis4dropxk%^r7kqx)lyt_7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp',
'codemirror2',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'examples.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'examples.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
mit
|
marc-sensenich/ansible
|
lib/ansible/plugins/filter/urlsplit.py
|
146
|
1136
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
from ansible.errors import AnsibleFilterError
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils import helpers
def split_url(value, query='', alias='urlsplit'):
results = helpers.object_to_dict(urlsplit(value), exclude=['count', 'index', 'geturl', 'encode'])
# If a query is supplied, make sure it's valid then return the results.
# If no option is supplied, return the entire dictionary.
if query:
if query not in results:
raise AnsibleFilterError(alias + ': unknown URL component: %s' % query)
return results[query]
else:
return results
# ---- Ansible filters ----
class FilterModule(object):
''' URI filter '''
def filters(self):
return {
'urlsplit': split_url
}
|
gpl-3.0
|
sclc/NAEF
|
exp_scripts/worker_exp_160507.py
|
1
|
5393
|
"""
Experiment Diary 2016-05-07
"""
import sys
from scipy import io
import numpy as np
from scipy.sparse.linalg import *
sys.path.append("../src/")
from worker import Worker
from native_conjugate_gradient import NativeConjugateGradient
from native_conjugate_gradient import NativeBlockConjugateGradient
from gerschgorin_circle_theorem import GerschgorinCircleTheoremEigenvalueEstimator
from chebyshev_polynomial import ChebyshevPolynomial
from chebyshev_basis_cacg import CBCG
from chebyshev_basis_cacg import BCBCG
class WorkerIterativeLinearSystemSolverCG_Exp_160507_A(Worker):
""" Description: Experiment A
Numerical Method: Naive Conjugate Gradient
tol:
max_iteration:
matrix:
Reference:
1.
"""
def __init__(self, mat_path):
""" """
#print ("WorkerIterativeLinearSystemSolver works good")
Worker.__init__(self)
self._hist_list = []
if mat_path == "":
""" Need to generatre matrix """
print("calling self._matrix_generation")
#self._mat = self._matrix_generation()
else:
self._mat_coo = io.mmread(mat_path)
self._mat = self._mat_coo.tocsr()
self._mat_info = io.mminfo(mat_path)
print("Done reading matrix {}, Row:{}, Col:{}".format( mat_path, self._mat.shape[0], self._mat.shape[1]))
print("mminfo:{}".format(self._mat_info))
#print("matrix:{}".format(type(self._mat)))
#print("matrix:{}".format(dir(self._mat)))
if self._mat.getformat() == "csr":
print("Yeah, it is CSR")
def _matrix_generator(self):
""" generation of matrix """
print("_matrix_generator")
def _setup_testbed(self, block_size):
""" this can considered as a basic experiment input descripting """
self._B = np.random.random( ( self._mat.shape[0],block_size) )
np.savetxt("/home/scl/tmp/rhs.csv",self._B, delimiter=",")
#self._B = np.ones( ( self._mat.shape[0],6) )
self._X = np.ones ( (self._mat.shape[1],block_size) )
#self._X = np.zeros ( (self._mat.shape[1],1) )
def _setup_numerical_algorithm(self,tol, maxiter, step_val):
""" After a linear solver or other numerical methods loaded
we need to setup the basic prarm for the algorithm
"""
self._tol = tol
self._maxiter = maxiter
self._step_val = step_val
def conduct_experiments(self, block_size, tol, maxiter, step_val):
""" function to condution the experiment """
print("to conduct the experient")
self._setup_testbed(block_size)
self._setup_numerical_algorithm(tol,maxiter,step_val)
#self._debug_gerschgorin()
#self._debug_chebyshev_polynomial_basis_generator()
#print ("before:{}".format(np.inner(self._X[:,0], self._X[:,0])))
self._debug_cbcg()
#self._debug_bcg()
#self._debug_bcbcg()
print("Experiments done")
def _debug_gerschgorin (self):
eigen_estimator = GerschgorinCircleTheoremEigenvalueEstimator()
max_eigen, min_eigen = eigen_estimator.csr_mat_extreme_eigenvalue_estimation(self._mat)
print("max:{}, min:{}".format(max_eigen,min_eigen))
def _debug_chebyshev_polynomial_basis_generator(self):
step_val=5
cheby_generator = ChebyshevPolynomial()
eigen_estimator = GerschgorinCircleTheoremEigenvalueEstimator()
max_eigen, min_eigen = eigen_estimator.csr_mat_extreme_eigenvalue_estimation(self._mat)
print("max:{}, min:{}\n".format(max_eigen,min_eigen))
res = cheby_generator.basis_generation_with_eigenvalue_shifting_and_scaling_single_vec(\
self._mat, self._B, step_val, max_eigen, min_eigen)
print(res)
def _debug_cbcg(self):
cbcg_solver_obj = CBCG()
self._final_x, self._final_r, self._residual_hist = \
cbcg_solver_obj.cbcg_solver(self._mat, self._B, self._X, self._step_val, self._tol, self._maxiter)
print(self._residual_hist)
def _debug_bcg(self):
bcg_solver_obj = NativeBlockConjugateGradient(self._mat, self._X, self._B, self._tol, self._maxiter)
self._final_X, self._final_R, self._residual_hist = bcg_solver_obj.bcg_variant_one_run(0)
print(self._residual_hist)
#bcg_solver_obj.bcg_variant_one_run(0)
def _debug_bcbcg(self):
bcbcg_solver_obj = BCBCG()
self._final_X, self._final_R, self._residual_hist = \
bcbcg_solver_obj.bcbcg_solver(self._mat, self._B, self._X, self._step_val, self._tol, self._maxiter,1)
print(self._residual_hist)
def main ():
# main function for today's experiments
#mat_path = "/home/scl/MStore/nasa2146/nasa2146.mtx"
mat_path = "/home/scl/MStore/crystm01/crystm01.mtx"
#mat_path = "/home/scl/MStore/ex13/ex13.mtx"
#mat_path = "/home/scl/MStore/LFAT5/LFAT5.mtx"
block_size = 1
tol = 1e-10
maxiter = 3
step_val =5
linear_system_solver_worker_test = WorkerIterativeLinearSystemSolverCG_Exp_160507_A(mat_path)
linear_system_solver_worker_test.conduct_experiments(block_size,tol,maxiter, step_val)
#linear_system_solver_worker_test.debug_NativeConjugateGradient()
if __name__ == "__main__":
""" call main funtion for testing """
main()
|
gpl-3.0
|
heuermh/ga4gh-schemas
|
tools/sphinx/protobuf-uml.py
|
5
|
11990
|
#!/usr/bin/env python
"""
Authors: Malisa Smith, Adam Novak, David Steinberg
Plugin for generating a UML diagram of the GA4GH schema.
The resulting png file is then used in RST files to create Sphinx documentation.
Usage:
protoc --plugin=protoc-gen-custom=<script path>/protobuf-uml.py --custom_out=<uml dir> <proto file(s)>
"""
import sys, os
from google.protobuf.compiler import plugin_pb2 as plugin
from graphviz import Source
# Check the message to see if it is a trivial map.
def is_trivial_map(nested_type):
# Define a trivial map to be a message with a nested_type.name
# that ends in "Entry" with two fields, "key" and "value". The
# "value" field has a type that is not 11 (and a list) or 14.
if nested_type.name.endswith("Entry") and len(nested_type.field) == 2 and nested_type.field[0].name == "key" and nested_type.field[1].name == "value" and not ((nested_type.field[1].type == 11 and not nested_type.field[1].type_name == ".google.protobuf.ListValue") or nested_type.field[1] == 14):
return True
else:
return False
# Parse a message. Pass in all the dictionaries to be updated, as well
# as the relevant message Parse the name, field, nested_type, and
# enum_type fields in DescriptorProto:
# https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto#L92
def parse_message(cluster, fields, containments, nests, id_targets, id_references, clusters, message, message_index=None):
# Track all the fields in the message
fields[message.name] = []
for field_index in range(0, len(message.field)):
field = message.field[field_index]
fields[message.name].append((field.name, field.type))
# Deal with containments, id_targets, and id_references, if
# applicable. Containments are signified by a field.type
# of 11 (for TYPE_MESSAGE) or 14 (for TYPE_ENUM). The type of
# containment can be determined by looking at field.type_name.
# Maps will also come up as type 11 and will have a
# field.type_name of something like
# .ga4gh.Feature.AttributesEntry, where the actual field name
# is attributes
if field.type == 11 or field.type == 14:
# We are likely adding containments of trivial maps,
# e.g. ('VariantCallEffect', 'InfoEntry', 'info'). But the
# edge is only drawn if the map/message itself is not a
# trivial map. When drawing containment edges, the program
# checks if the field type_name is a key in the fields
# dictionary.
containments.add((message.name, field.type_name.split(".")[-1], field.name))
# id_targets are simply fields where field.name is "id"
if field.name.lower() == "id":
id_targets[message.name.lower()] = (message.name, field.name.lower().split(".")[-1])
# id_references are fields which end in id or ids
elif field.name.lower().endswith("id") or field.name.lower().endswith("ids"):
if field.name.lower().endswith("id"):
destination = field.name.lower()[0:-2]
elif field.name.lower().endswith("ids"):
destination = field.name.lower()[0:-3]
destination = destination.replace("_", "")
id_references.add((message.name, destination, field.name))
for nested_type in message.nested_type:
# Nested messages can be defined without actually using it in
# a field in the outer message. So, a nested_type is not
# necessarily used in a field.
# Note: according to
# https://developers.google.com/protocol-buffers/docs/proto#backwards-compatibility
# maps are sent as messages (not map-types) "on the wire". We
# don't want to draw nodes for nested types that are trivial
# maps of string to string. So, check if we want to process
# the nested_type further:
if not is_trivial_map(nested_type):
# The nested_type is nested within the message.
# Nested_type is itself a message, so recursively call
# this function.
parse_message(cluster, fields, containments, nests, id_targets, id_references, clusters, nested_type)
for enum_type in message.enum_type: # A nested Enum
# Define it as a top-level type. So it has a fields entry.
fields[enum_type.name] = []
for field in enum_type.value:
fields[enum_type.name].append((field.name, 9))
# Finally, add it to the cluster
clusters[cluster.name].append(enum_type.name)
# Add the name of the message as a type in the current cluster
clusters[cluster.name].append(message.name)
def parse_cluster(cluster, fields, containments, nests, id_targets, id_references, clusters):
cluster_name = cluster.name
if cluster_name.endswith("google/protobuf/struct.proto") or cluster_name.endswith("google/api/http.proto") or cluster_name.endswith("google/protobuf/descriptor.proto"):
pass
else:
clusters[cluster_name] = []
# process all the enum-types in the cluster
for enum in cluster.enum_type:
# Track all the enum "fields"
fields[enum.name] = []
for field in enum.value:
# An Enum field is a string. field types in
# DescriptorProto uses 9 for TYPE_STRING
fields[enum.name].append((field.name, 9))
# Record the name of the enum as a type in the current cluster
clusters[cluster.name].append(enum.name)
# Track all the message-types in the cluster
for message_index in range(0, len(cluster.message_type)):
message = cluster.message_type[message_index]
# Recursively parse each message
parse_message(cluster, fields, containments, nests, id_targets, id_references, clusters, message, message_index)
# Note: the message will add itself to the cluster
def write_graph(fields, containments, nests, matched_references, clusters):
# Start a digraph
graph = "digraph UML {\n"
# Set the image's size, in inches
graph += "size= \"33,33\";\n"
# Add a title
graph += "labelloc=\"t\";\n"
graph += "label=<<FONT POINT-SIZE=\"45\">GA4GH Schema Diagram</FONT>>;\n"
# Define node properties: shaped like UML items.
graph += "node [\n"
graph += "\tshape=plaintext\n"
graph += "]\n\n"
# Draw each node/type/record as a table
for type_name, field_list in fields.items():
graph += "{} [label=<\n".format(type_name)
graph += "<TABLE BORDER='0' CELLBORDER='1' CELLSPACING='0' CELLPADDING='4' bgcolor='#002060' color='#002060'>\n"
graph += "\t<TR>\n"
graph += "\t\t<TD COLSPAN='2' bgcolor='#79A6FF' border='3'><FONT POINT-SIZE='20' color='white'>{}</FONT>".format(type_name)
graph += "</TD>\n"
graph += "\t</TR>\n"
# Now draw the rows of fields for the type. A field_list of
# [a, b, c, d, e, f, g] will have [a, e] in row 1, [b, f] in
# row 2, [c, g] in row 3, and just [d] in row 4
num_fields = len(field_list)
for i in range(0, num_fields//2 + num_fields%2):
# Draw one row.
graph += "\t<TR>\n"
# Port number and displayed text will be the i'th field's
# name
graph += "\t\t<TD align='left' port='{}'><FONT color='white'>- {}</FONT></TD>\n".format(field_list[i][0], field_list[i][0])
if (num_fields%2) == 1 and (i == num_fields//2 + num_fields%2 - 1):
# Don't draw the second cell in the row if you have an
# odd number of fields and it is the last row
pass
else:
graph += "\t\t<TD align='left' port='{}'><FONT color='white'>- {}</FONT></TD>\n".format(field_list[num_fields//2 + num_fields%2 + i][0], field_list[num_fields//2 + num_fields%2 + i][0])
graph += "\t</TR>\n"
# Finish the table
graph += "</TABLE>>];\n\n"
# Now define the clusters/subgraphs
for cluster_name, cluster_types in clusters.items():
graph += "subgraph cluster_{} {{\n".format(cluster_name.replace(".", "_").replace("/", "_"))
graph += "\tstyle=\"rounded, filled\";\n"
graph += "\tcolor=lightgrey;\n"
graph += "\tnode [style=filled,color=white];\n"
graph += "\tlabel = \"{}\";\n".format(cluster_name.replace(".", "_"))
# After all the cluster formatting, define the cluster types
for cluster_type in cluster_types:
# cluster_type should match up with a type_name from fields
graph += "\t{};\n".format(cluster_type)
graph += "}\n\n"
# Define edge properties for containments
graph += "edge [\n"
graph += "\tdir=both\n"
graph += "\tarrowtail=odiamond\n"
graph += "\tarrowhead=none\n"
graph += "\tcolor=\"#C55A11\"\n"
graph += "\tpenwidth=2\n"
graph += "]\n\n"
for container, containee, container_field_name in containments:
# Now do the containment edges
# Only write the edge if the containee is a top-level field in fields.
if containee in fields:
graph += "{}:{}:w -> {}\n".format(container,
container_field_name, containee)
# Define edge properties for references
graph += "\nedge [\n"
graph += "\tdir=both\n"
graph += "\tarrowtail=none\n"
graph += "\tarrowhead=vee\n"
graph += "\tstyle=dashed\n"
graph += "\tcolor=\"darkgreen\"\n"
graph += "\tpenwidth=2\n"
graph += "]\n\n"
for referencer, referencer_field, referencee in matched_references:
# Now do the reference edges
graph += "{}:{}:w -> {}:id:w\n".format(referencer, referencer_field,
referencee)
# Close the digraph off.
graph += "}\n"
graph = graph.replace("\n", " ").replace("\t", " ")
src = Source(graph, format='svg')
src.render('_build/generated_images/schema_uml')
def parse_descriptor(request):
# Holds the fields for each type, as lists of tuples of (name,
# type), indexed by type. All types are fully qualified.
fields = {}
# Holds edge tuples for containment from container to contained.
containments = set()
# Holds edge tuples for nested type edges, from parent type to
# nested type.
nests = set()
# Holds a dictionary from lower-case short name to fully-qualified
# name for everything with an "id" field. E.g. if Variant has an
# id, then key is "variant" and value is "Variant"
id_targets = {}
# Holds a set of tuples of ID references, (fully qualified name of
# referencer, lower-case target name)
id_references = set()
# Holds the field names from each original .proto file, in order
# to draw one cluster of fields for each file
# Key: cluster/file name Value: tuple of field names
clusters = {}
for cluster in request.proto_file:
parse_cluster(cluster, fields, containments, nests, id_targets, id_references, clusters)
# Now match the id references to targets. Tuples of strings,
# i.e. (referencer, referencer_field, referencee)
matched_references = set()
for id_reference in id_references:
if id_reference[1] in id_targets:
matched_references.add((id_reference[0], id_reference[2], id_targets[id_reference[1]][0]))
return (fields, containments, nests, matched_references, clusters)
def generate_code(request):
(fields, containments, nests, matched_references, clusters) = parse_descriptor(request)
write_graph(fields, containments, nests, matched_references, clusters)
if __name__ == '__main__':
# Read request message from stdin
data = sys.stdin.read()
# Parse request
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
# Generate code
generate_code(request)
|
apache-2.0
|
cowlicks/numpy
|
numpy/f2py/tests/test_return_real.py
|
69
|
5323
|
from __future__ import division, absolute_import, print_function
from numpy.testing import *
from numpy import array
from numpy.compat import long
import math
import util
class TestReturnReal(util.F2PyTest):
def check_function(self, t):
if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
err = 1e-5
else:
err = 0.0
assert_( abs(t(234)-234.0)<=err)
assert_( abs(t(234.6)-234.6)<=err)
assert_( abs(t(long(234))-234.0)<=err)
assert_( abs(t('234')-234)<=err)
assert_( abs(t('234.6')-234.6)<=err)
assert_( abs(t(-234)+234)<=err)
assert_( abs(t([234])-234)<=err)
assert_( abs(t((234,))-234.)<=err)
assert_( abs(t(array(234))-234.)<=err)
assert_( abs(t(array([234]))-234.)<=err)
assert_( abs(t(array([[234]]))-234.)<=err)
assert_( abs(t(array([234], 'b'))+22)<=err)
assert_( abs(t(array([234], 'h'))-234.)<=err)
assert_( abs(t(array([234], 'i'))-234.)<=err)
assert_( abs(t(array([234], 'l'))-234.)<=err)
assert_( abs(t(array([234], 'B'))-234.)<=err)
assert_( abs(t(array([234], 'f'))-234.)<=err)
assert_( abs(t(array([234], 'd'))-234.)<=err)
if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
assert_( t(1e200)==t(1e300)) # inf
#assert_raises(ValueError, t, array([234], 'S1'))
assert_raises(ValueError, t, 'abc')
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
assert_raises(Exception, t, t)
assert_raises(Exception, t, {})
try:
r = t(10**400)
assert_( repr(r) in ['inf', 'Infinity'], repr(r))
except OverflowError:
pass
class TestCReturnReal(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
code = """
python module c_ext_return_real
usercode \'\'\'
float t4(float value) { return value; }
void s4(float *t4, float value) { *t4 = value; }
double t8(double value) { return value; }
void s8(double *t8, double value) { *t8 = value; }
\'\'\'
interface
function t4(value)
real*4 intent(c) :: t4,value
end
function t8(value)
real*8 intent(c) :: t8,value
end
subroutine s4(t4,value)
intent(c) s4
real*4 intent(out) :: t4
real*4 intent(c) :: value
end
subroutine s8(t8,value)
intent(c) s8
real*8 intent(out) :: t8
real*8 intent(c) :: value
end
end interface
end python module c_ext_return_real
"""
@dec.slow
def test_all(self):
for name in "t4,t8,s4,s8".split(","):
self.check_function(getattr(self.module, name))
class TestF77ReturnReal(TestReturnReal):
code = """
function t0(value)
real value
real t0
t0 = value
end
function t4(value)
real*4 value
real*4 t4
t4 = value
end
function t8(value)
real*8 value
real*8 t8
t8 = value
end
function td(value)
double precision value
double precision td
td = value
end
subroutine s0(t0,value)
real value
real t0
cf2py intent(out) t0
t0 = value
end
subroutine s4(t4,value)
real*4 value
real*4 t4
cf2py intent(out) t4
t4 = value
end
subroutine s8(t8,value)
real*8 value
real*8 t8
cf2py intent(out) t8
t8 = value
end
subroutine sd(td,value)
double precision value
double precision td
cf2py intent(out) td
td = value
end
"""
@dec.slow
def test_all(self):
for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","):
self.check_function(getattr(self.module, name))
class TestF90ReturnReal(TestReturnReal):
suffix = ".f90"
code = """
module f90_return_real
contains
function t0(value)
real :: value
real :: t0
t0 = value
end function t0
function t4(value)
real(kind=4) :: value
real(kind=4) :: t4
t4 = value
end function t4
function t8(value)
real(kind=8) :: value
real(kind=8) :: t8
t8 = value
end function t8
function td(value)
double precision :: value
double precision :: td
td = value
end function td
subroutine s0(t0,value)
real :: value
real :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s4(t4,value)
real(kind=4) :: value
real(kind=4) :: t4
!f2py intent(out) t4
t4 = value
end subroutine s4
subroutine s8(t8,value)
real(kind=8) :: value
real(kind=8) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
subroutine sd(td,value)
double precision :: value
double precision :: td
!f2py intent(out) td
td = value
end subroutine sd
end module f90_return_real
"""
@dec.slow
def test_all(self):
for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","):
self.check_function(getattr(self.module.f90_return_real, name))
if __name__ == "__main__":
import nose
nose.runmodule()
|
bsd-3-clause
|
persandstrom/home-assistant
|
homeassistant/components/media_player/philips_js.py
|
4
|
6851
|
"""
Media Player component to integrate TVs exposing the Joint Space API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.philips_js/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_STEP,
MediaPlayerDevice)
from homeassistant.const import (
CONF_API_VERSION, CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from homeassistant.util import Throttle
REQUIREMENTS = ['ha-philipsjs==0.0.5']
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
SUPPORT_PHILIPS_JS = SUPPORT_TURN_OFF | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_SELECT_SOURCE
SUPPORT_PHILIPS_JS_TV = SUPPORT_PHILIPS_JS | SUPPORT_NEXT_TRACK | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_PLAY
CONF_ON_ACTION = 'turn_on_action'
DEFAULT_DEVICE = 'default'
DEFAULT_HOST = '127.0.0.1'
DEFAULT_NAME = "Philips TV"
DEFAULT_API_VERSION = '1'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_API_VERSION, default=DEFAULT_API_VERSION): cv.string,
vol.Optional(CONF_ON_ACTION): cv.SCRIPT_SCHEMA,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Philips TV platform."""
import haphilipsjs
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
api_version = config.get(CONF_API_VERSION)
turn_on_action = config.get(CONF_ON_ACTION)
tvapi = haphilipsjs.PhilipsTV(host, api_version)
on_script = Script(hass, turn_on_action) if turn_on_action else None
add_entities([PhilipsTV(tvapi, name, on_script)])
class PhilipsTV(MediaPlayerDevice):
"""Representation of a Philips TV exposing the JointSpace API."""
def __init__(self, tv, name, on_script):
"""Initialize the Philips TV."""
self._tv = tv
self._name = name
self._state = STATE_UNKNOWN
self._min_volume = None
self._max_volume = None
self._volume = None
self._muted = False
self._program_name = None
self._channel_name = None
self._source = None
self._source_list = []
self._connfail = 0
self._source_mapping = {}
self._watching_tv = None
self._channel_name = None
self._on_script = on_script
@property
def name(self):
"""Return the device name."""
return self._name
@property
def should_poll(self):
"""Device should be polled."""
return True
@property
def supported_features(self):
"""Flag media player features that are supported."""
is_supporting_turn_on = SUPPORT_TURN_ON if self._on_script else 0
if self._watching_tv:
return SUPPORT_PHILIPS_JS_TV | is_supporting_turn_on
return SUPPORT_PHILIPS_JS | is_supporting_turn_on
@property
def state(self):
"""Get the device state. An exception means OFF state."""
return self._state
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
def select_source(self, source):
"""Set the input source."""
if source in self._source_mapping:
self._tv.setSource(self._source_mapping.get(source))
self._source = source
if not self._tv.on:
self._state = STATE_OFF
self._watching_tv = bool(self._tv.source_id == 'tv')
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
def turn_on(self):
"""Turn on the device."""
if self._on_script:
self._on_script.run()
def turn_off(self):
"""Turn off the device."""
self._tv.sendKey('Standby')
if not self._tv.on:
self._state = STATE_OFF
def volume_up(self):
"""Send volume up command."""
self._tv.sendKey('VolumeUp')
if not self._tv.on:
self._state = STATE_OFF
def volume_down(self):
"""Send volume down command."""
self._tv.sendKey('VolumeDown')
if not self._tv.on:
self._state = STATE_OFF
def mute_volume(self, mute):
"""Send mute command."""
self._tv.sendKey('Mute')
if not self._tv.on:
self._state = STATE_OFF
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._tv.setVolume(volume)
def media_previous_track(self):
"""Send rewind command."""
self._tv.sendKey('Previous')
def media_next_track(self):
"""Send fast forward command."""
self._tv.sendKey('Next')
@property
def media_title(self):
"""Title of current playing media."""
if self._watching_tv and self._channel_name:
return '{} - {}'.format(self._source, self._channel_name)
return self._source
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data and update device state."""
self._tv.update()
self._min_volume = self._tv.min_volume
self._max_volume = self._tv.max_volume
self._volume = self._tv.volume
self._muted = self._tv.muted
if self._tv.source_id:
self._source = self._tv.getSourceName(self._tv.source_id)
if self._tv.sources and not self._source_list:
for srcid in self._tv.sources:
srcname = self._tv.getSourceName(srcid)
self._source_list.append(srcname)
self._source_mapping[srcname] = srcid
if self._tv.on:
self._state = STATE_ON
else:
self._state = STATE_OFF
self._watching_tv = bool(self._tv.source_id == 'tv')
self._tv.getChannelId()
self._tv.getChannels()
if self._tv.channels and self._tv.channel_id in self._tv.channels:
self._channel_name = self._tv.channels[self._tv.channel_id]['name']
else:
self._channel_name = None
|
apache-2.0
|
Nevax07/FreedomOS
|
build/tools/img2sdat/rangelib.py
|
2
|
8701
|
# Copyright (C) 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import heapq
import itertools
__all__ = ["RangeSet"]
class RangeSet(object):
"""A RangeSet represents a set of nonoverlapping ranges on the
integers (ie, a set of integers, but efficient when the set contains
lots of runs."""
def __init__(self, data=None):
self.monotonic = False
if isinstance(data, str):
self._parse_internal(data)
elif data:
assert len(data) % 2 == 0
self.data = tuple(self._remove_pairs(data))
self.monotonic = all(x < y for x, y in zip(self.data, self.data[1:]))
else:
self.data = ()
def __iter__(self):
for i in range(0, len(self.data), 2):
yield self.data[i:i+2]
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return self.data != other.data
def __nonzero__(self):
return bool(self.data)
def __str__(self):
if not self.data:
return "empty"
else:
return self.to_string()
def __repr__(self):
return '<RangeSet("' + self.to_string() + '")>'
@classmethod
def parse(cls, text):
"""Parse a text string consisting of a space-separated list of
blocks and ranges, eg "10-20 30 35-40". Ranges are interpreted to
include both their ends (so the above example represents 18
individual blocks. Returns a RangeSet object.
If the input has all its blocks in increasing order, then returned
RangeSet will have an extra attribute 'monotonic' that is set to
True. For example the input "10-20 30" is monotonic, but the input
"15-20 30 10-14" is not, even though they represent the same set
of blocks (and the two RangeSets will compare equal with ==).
"""
return cls(text)
def _parse_internal(self, text):
data = []
last = -1
monotonic = True
for p in text.split():
if "-" in p:
s, e = (int(x) for x in p.split("-"))
data.append(s)
data.append(e+1)
if last <= s <= e:
last = e
else:
monotonic = False
else:
s = int(p)
data.append(s)
data.append(s+1)
if last <= s:
last = s+1
else:
monotonic = False
data.sort()
self.data = tuple(self._remove_pairs(data))
self.monotonic = monotonic
@staticmethod
def _remove_pairs(source):
"""Remove consecutive duplicate items to simplify the result.
[1, 2, 2, 5, 5, 10] will become [1, 10]."""
last = None
for i in source:
if i == last:
last = None
else:
if last is not None:
yield last
last = i
if last is not None:
yield last
def to_string(self):
out = []
for i in range(0, len(self.data), 2):
s, e = self.data[i:i+2]
if e == s+1:
out.append(str(s))
else:
out.append(str(s) + "-" + str(e-1))
return " ".join(out)
def to_string_raw(self):
assert self.data
return str(len(self.data)) + "," + ",".join(str(i) for i in self.data)
def union(self, other):
"""Return a new RangeSet representing the union of this RangeSet
with the argument.
>>> RangeSet("10-19 30-34").union(RangeSet("18-29"))
<RangeSet("10-34")>
>>> RangeSet("10-19 30-34").union(RangeSet("22 32"))
<RangeSet("10-19 22 30-34")>
"""
out = []
z = 0
for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((+1, -1)))):
if (z == 0 and d == 1) or (z == 1 and d == -1):
out.append(p)
z += d
return RangeSet(data=out)
def intersect(self, other):
"""Return a new RangeSet representing the intersection of this
RangeSet with the argument.
>>> RangeSet("10-19 30-34").intersect(RangeSet("18-32"))
<RangeSet("18-19 30-32")>
>>> RangeSet("10-19 30-34").intersect(RangeSet("22-28"))
<RangeSet("")>
"""
out = []
z = 0
for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((+1, -1)))):
if (z == 1 and d == 1) or (z == 2 and d == -1):
out.append(p)
z += d
return RangeSet(data=out)
def subtract(self, other):
"""Return a new RangeSet representing subtracting the argument
from this RangeSet.
>>> RangeSet("10-19 30-34").subtract(RangeSet("18-32"))
<RangeSet("10-17 33-34")>
>>> RangeSet("10-19 30-34").subtract(RangeSet("22-28"))
<RangeSet("10-19 30-34")>
"""
out = []
z = 0
for p, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((-1, +1)))):
if (z == 0 and d == 1) or (z == 1 and d == -1):
out.append(p)
z += d
return RangeSet(data=out)
def overlaps(self, other):
"""Returns true if the argument has a nonempty overlap with this
RangeSet.
>>> RangeSet("10-19 30-34").overlaps(RangeSet("18-32"))
True
>>> RangeSet("10-19 30-34").overlaps(RangeSet("22-28"))
False
"""
# This is like intersect, but we can stop as soon as we discover the
# output is going to be nonempty.
z = 0
for _, d in heapq.merge(zip(self.data, itertools.cycle((+1, -1))),
zip(other.data, itertools.cycle((+1, -1)))):
if (z == 1 and d == 1) or (z == 2 and d == -1):
return True
z += d
return False
def size(self):
"""Returns the total size of the RangeSet (ie, how many integers
are in the set).
>>> RangeSet("10-19 30-34").size()
15
"""
total = 0
for i, p in enumerate(self.data):
if i % 2:
total += p
else:
total -= p
return total
def map_within(self, other):
"""'other' should be a subset of 'self'. Returns a RangeSet
representing what 'other' would get translated to if the integers
of 'self' were translated down to be contiguous starting at zero.
>>> RangeSet("0-9").map_within(RangeSet("3-4"))
<RangeSet("3-4")>
>>> RangeSet("10-19").map_within(RangeSet("13-14"))
<RangeSet("3-4")>
>>> RangeSet("10-19 30-39").map_within(RangeSet("17-19 30-32"))
<RangeSet("7-12")>
>>> RangeSet("10-19 30-39").map_within(RangeSet("12-13 17-19 30-32"))
<RangeSet("2-3 7-12")>
"""
out = []
offset = 0
start = None
for p, d in heapq.merge(zip(self.data, itertools.cycle((-5, +5))),
zip(other.data, itertools.cycle((-1, +1)))):
if d == -5:
start = p
elif d == +5:
offset += p-start
start = None
else:
out.append(offset + p - start)
return RangeSet(data=out)
def extend(self, n):
"""Extend the RangeSet by 'n' blocks.
The lower bound is guaranteed to be non-negative.
>>> RangeSet("0-9").extend(1)
<RangeSet("0-10")>
>>> RangeSet("10-19").extend(15)
<RangeSet("0-34")>
>>> RangeSet("10-19 30-39").extend(4)
<RangeSet("6-23 26-43")>
>>> RangeSet("10-19 30-39").extend(10)
<RangeSet("0-49")>
"""
out = self
for i in range(0, len(self.data), 2):
s, e = self.data[i:i+2]
s1 = max(0, s - n)
e1 = e + n
out = out.union(RangeSet(str(s1) + "-" + str(e1-1)))
return out
def first(self, n):
"""Return the RangeSet that contains at most the first 'n' integers.
>>> RangeSet("0-9").first(1)
<RangeSet("0")>
>>> RangeSet("10-19").first(5)
<RangeSet("10-14")>
>>> RangeSet("10-19").first(15)
<RangeSet("10-19")>
>>> RangeSet("10-19 30-39").first(3)
<RangeSet("10-12")>
>>> RangeSet("10-19 30-39").first(15)
<RangeSet("10-19 30-34")>
>>> RangeSet("10-19 30-39").first(30)
<RangeSet("10-19 30-39")>
>>> RangeSet("0-9").first(0)
<RangeSet("")>
"""
if self.size() <= n:
return self
out = []
for s, e in self:
if e - s >= n:
out += (s, s+n)
break
else:
out += (s, e)
n -= e - s
return RangeSet(data=out)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
apache-2.0
|
atl/thrifty-p2p
|
storetest.py
|
1
|
2112
|
#!/usr/bin/env python
# encoding: utf-8
"""
storetest.py
Created by Adam T. Lindsay on 2009-05-18.
The MIT License
Copyright (c) 2009 Adam T. Lindsay.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append('gen-py')
from locator.ttypes import Location
from storeserver import remote_call, parser, DEFAULTPORT, SERVICENAME
from location import find_matching_service, str2loc
usage = '''
python %prog
Looks for a storage node at PEER, either as specified, or
auto-discovered on the localhost starting from the default
port. Sends a bunch of keys for resolution.'''
parser.set_usage(usage)
parser.remove_option('--port')
KEYS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if __name__ == '__main__':
(options, args) = parser.parse_args()
if options.peer:
loc = str2loc(options.peer)
else:
loc = find_matching_service(Location('localhost', DEFAULTPORT), SERVICENAME) or sys.exit()
for key in KEYS:
value = remote_call('get', loc, key)
if value:
print value
else:
print "None received from expected %s" % remote_call('get_node', loc, key)
|
mit
|
OmkarPathak/Python-Programs
|
CompetitiveProgramming/HackerEarth/Bit_Manipulation/P03_MonkAndHisFriends.py
|
1
|
1442
|
# Monk has a very good friend, Puchi. As weird as his name, are the games he plays.
# One fine day, they decided to play a game to test how diverse their choices are. Both of them choose exactly
# one integer each. Monk chooses an integer M and Puchi chooses an integer P.
# The diversity of their choices is defined as the number of bits whose status is different in the binary
# representation of M and P , i.e. , count of bits that are ,either set in M and unset in P or set in P and
# unset in M.
# Find the answer to their game.
#
# Input:
# First line contains T. T test cases follow.
# Each test case consists of 2 space-separated integers P and M.
#
# Output:
# Print the answer to each test case in a new line.
#
# Constraints:
# 1 ≤ T ≤ 104
# 0 ≤ M, P ≤ 1016
#
# SAMPLE INPUT
# 4
# 1 4
# 3 3
# 5 1
# 8 7
#
# SAMPLE OUTPUT
# 2
# 0
# 1
# 4
#
# Explanation
# 1 (0001) and 4 (0100) differ in 2 bits.
# The two numbers are identical.
# 5 (0101) and 1(0001) differ in 1 bit.
# 8 (1000) and 7(0111) differ in 4 bits.
testCases = int(input())
for _ in range(testCases):
M, P = map(int, input().split())
c = M ^ P
result = 0
while(c > 0):
result += c % 2
c //= 2
print(result)
# Another Approach (faster):
# def count1s(n):
# count=0
# while n!=0:
# n = n&(n-1)
# count +=1
# return count
#
# for tc in range(int(input())):
# a,b = list(map(int,input().split()))
# print(count1s(a^b))
|
gpl-3.0
|
jjkoletar/panda3d
|
contrib/src/sceneeditor/seForceGroup.py
|
8
|
5543
|
from pandac.PandaModules import *
from direct.showbase.DirectObject import DirectObject
from direct.showbase.PhysicsManagerGlobal import *
#Manakel 2/12/2005: replace from pandac import by from pandac.PandaModules import
from pandac.PandaModules import ForceNode
from direct.directnotify import DirectNotifyGlobal
import sys
class ForceGroup(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('ForceGroup')
id = 1
def __init__(self, name=None):
"""__init__(self)"""
if (name == None):
self.name = 'ForceGroup-%d' % ForceGroup.id
ForceGroup.id += 1
else:
self.name = name
self.node = ForceNode.ForceNode(self.name)
self.nodePath = NodePath(self.node)
self.fEnabled = 0
self.particleEffect = None
def cleanup(self):
self.node.clear()
self.nodePath.removeNode()
del self.nodePath
del self.node
del self.particleEffect
def enable(self):
"""
Convenience function to enable all forces in force group
"""
for i in range(self.node.getNumForces()):
f = self.node.getForce(i)
f.setActive(1)
self.fEnabled = 1
def disable(self):
"""
Convenience function to disable all forces in force group
"""
for i in range(self.node.getNumForces()):
f = self.node.getForce(i)
f.setActive(0)
self.fEnabled = 0
def isEnabled(self):
return self.fEnabled
def addForce(self, force):
"""addForce(self, force)"""
self.node.addForce(force)
if (self.particleEffect):
self.particleEffect.addForce(force)
def removeForce(self, force):
"""removeForce(self, force)"""
self.node.removeForce(force)
if (self.particleEffect != None):
self.particleEffect.removeForce(force)
# Get/set
def getName(self):
return self.name
def getNode(self):
return self.node
def getNodePath(self):
return self.nodePath
# Utility functions
def __getitem__(self, index):
numForces = self.node.getNumForces()
if ((index < 0) or (index >= numForces)):
raise IndexError
return self.node.getForce(index)
def __len__(self):
return self.node.getNumForces()
def asList(self):
l = []
for i in range(self.node.getNumForces()):
l.append(self.node.getForce(i))
return l
def printParams(self, file = sys.stdout, targ = 'self'):
i1=" "
i2=i1+i1
file.write(i2+'# Force parameters\n')
for i in range(self.node.getNumForces()):
f = self.node.getForce(i)
fname = 'force%d' % i
if isinstance(f, LinearForce):
amplitude = f.getAmplitude()
massDependent = f.getMassDependent()
if isinstance(f, LinearCylinderVortexForce):
file.write(i2+fname + ' = LinearCylinderVortexForce(%.4f, %.4f, %.4f, %.4f, %d)\n' % (f.getRadius(), f.getLength(), f.getCoef(), amplitude, massDependent))
elif isinstance(f, LinearDistanceForce):
radius = f.getRadius()
falloffType = f.getFalloffType()
ftype = 'FTONEOVERR'
if (falloffType == LinearDistanceForce.FTONEOVERR):
ftype = 'FTONEOVERR'
elif (falloffType == LinearDistanceForce.FTONEOVERRSQUARED):
ftype = 'FTONEOVERRSQUARED'
elif (falloffType == LinearDistanceForce.FTONEOVERRCUBED):
ftype = 'FTONEOVERRCUBED'
forceCenter = f.getForceCenter()
if isinstance(f, LinearSinkForce):
file.write(i2+fname + ' = LinearSinkForce(Point3(%.4f, %.4f, %.4f), LinearDistanceForce.%s, %.4f, %.4f, %d)\n' % (forceCenter[0], forceCenter[1], forceCenter[2], ftype, radius, amplitude, massDependent))
elif isinstance(f, LinearSourceForce):
file.write(i2+fname + ' = LinearSourceForce(Point3(%.4f, %.4f, %.4f), LinearDistanceForce.%s, %.4f, %.4f, %d)\n' % (forceCenter[0], forceCenter[1], forceCenter[2], ftype, radius, amplitude, massDependent))
elif isinstance(f, LinearFrictionForce):
file.write(i2+fname + ' = LinearFrictionForce(%.4f, %.4f, %d)\n' % (f.getCoef(), amplitude, massDependent))
elif isinstance(f, LinearJitterForce):
file.write(i2+fname + ' = LinearJitterForce(%.4f, %d)\n' % (amplitude, massDependent))
elif isinstance(f, LinearNoiseForce):
file.write(i2+fname + ' = LinearNoiseForce(%.4f, %d)\n' % (amplitude, massDependent))
elif isinstance(f, LinearVectorForce):
vec = f.getLocalVector()
file.write(i2+fname + ' = LinearVectorForce(Vec3(%.4f, %.4f, %.4f), %.4f, %d)\n' % (vec[0], vec[1], vec[2], amplitude, massDependent))
elif isinstance(f, AngularForce):
if isinstance(f, AngularVectorForce):
vec = f.getQuat()
file.write(i2+fname + ' = AngularVectorForce(Quat(%.4f, %.4f, %.4f))\n' % (vec[0], vec[1], vec[2], vec[3]))
file.write(i2+fname + '.setActive(%d)\n' % f.getActive())
file.write(i2+targ + '.addForce(%s)\n' % fname)
|
bsd-3-clause
|
tictakk/servo
|
tests/wpt/web-platform-tests/css/tools/w3ctestlib/HTMLSerializer.py
|
80
|
10924
|
#!/usr/bin/python
# CSS Test Source Manipulation Library
# Initial code by fantasai, joint copyright 2010 W3C and Microsoft
# additions by [email protected] copyright 2013 Hewlett-Packard
# Licensed under BSD 3-Clause: <http://www.w3.org/Consortium/Legal/2008/03-bsd-license>
import lxml
from lxml import etree
import htmlentitydefs
import copy
class HTMLSerializer(object):
gXMLns = 'http://www.w3.org/XML/1998/namespace'
gHTMLns = 'http://www.w3.org/1999/xhtml'
gDefaultNamespaces = {'http://www.w3.org/XML/1998/namespace': 'xmlns',
'http://www.w3.org/2000/xmlns/': 'xmlns',
'http://www.w3.org/1999/xlink': 'xlink'}
gVoidElements = frozenset((
'base',
'command',
'event-source',
'link',
'meta',
'hr',
'br',
'img',
'embed',
'param',
'area',
'col',
'input',
'source'
))
gCDataElements = frozenset((
'style',
'script'
))
gInvisibleChars = frozenset(
# ASCII control chars
range(0x0, 0x9) + range(0xB, 0xD) + range(0xE, 0x20) +
# Other control chars
# fixed-width spaces, zero-width marks, bidi marks
range(0x2000, 0x2010) +
# LS, PS, bidi control codes
range(0x2028, 0x2030) +
# nbsp, mathsp, ideosp, WJ, interlinear
[0x00A0, 0x205F, 0x3000, 0x2060, 0xFFF9, 0xFFFA, 0xFFFB]
)
gXMLEscapes = frozenset(gInvisibleChars |
frozenset((ord('&'), ord('<'), ord('>'))))
gXMLEntityNames = {'"': 'quot', '&': 'amp', "'": 'apos', '<': 'lt', '>': 'gt'}
gDocTypes = {
'html': '<!DOCTYPE html>',
'html4':
'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">',
'html4-transitional':
'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">',
'html4-frameset':
'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">',
'svg11':
'<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1 Basic//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-basic.dtd">',
'svg11-tiny':
'<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1 Tiny//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11-tiny.dtd">',
'xhtml10':
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'xhtml10-transitional':
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">',
'xhtml10-frameset':
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">',
'xhtml11':
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">',
'xhtml-basic11':
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">'
}
def __init__(self):
self._reset()
def _reset(self, xhtml = False):
self.mOutput = u''
self.mXHTML = xhtml
def _output(self, *args):
for arg in args:
self.mOutput += unicode(arg)
def _escape(self, text, escapeChars):
# This algorithm is O(MN) for M len(text) and N num escapable
# But it doesn't modify the text when N is zero (common case) and
# N is expected to be small (usually 1 or 2) in most other cases.
escapable = set()
for char in text:
if ord(char) in escapeChars:
escapable.add(char)
for char in escapable:
if (self.mXHTML):
name = self.gXMLEntityNames.get(char)
else:
name = htmlentitydefs.codepoint2name.get(ord(char))
escape = u'&%s;' % name if name else u'&#x%X;' % ord(char)
text = text.replace(char, escape)
return text
def _escapeXML(self, text):
return self._escape(text, self.gXMLEscapes)
def _escapeInvisible(self, text):
return self._escape(text, self.gInvisibleChars)
def _serializeElement(self, element, namespacePrefixes):
qName = etree.QName(element)
attrs = element.attrib.items() # in tree order
if (not namespacePrefixes):
namespacePrefixes = self.gDefaultNamespaces
if (self.mXHTML):
namespacePrefixes = copy.copy(namespacePrefixes)
for attr, value in attrs:
attrQName = etree.QName(attr)
if (self.gXMLns == attrQName.namespace):
namespacePrefixes[value] = attrQName.localname
elif ('xmlns' == attrQName.localname):
namespacePrefixes[value] = ''
if (self.mXHTML and qName.namespace and namespacePrefixes[qName.namespace]):
self._output('<', namespacePrefixes[qName.namespace], ':', qName.localname)
else:
self._output('<', qName.localname)
for attr, value in attrs:
attrQName = etree.QName(attr)
if ((attrQName.namespace == self.gXMLns) and ('lang' == attrQName.localname)):
if (self.mXHTML):
attr = 'xml:lang'
else:
attr = 'lang'
elif (attrQName.namespace and namespacePrefixes[attrQName.namespace]):
attr = namespacePrefixes[attrQName.namespace] + ':' + attrQName.localname
else:
attr = attrQName.localname
self._output(' ', attr, '=')
value = value.replace('&', '&')
if (self.mXHTML):
value = value.replace('<', '<')
if (('"' in value) and ("'" not in value)):
self._output("'", self._escapeInvisible(value), "'")
else:
self._output('"', self._escapeInvisible(value.replace('"', '"')), '"')
if ((qName.namespace == self.gHTMLns) and (qName.localname in self.gVoidElements)):
if (self.mXHTML):
self._output(' />')
else:
self._output('>')
else:
self._output('>')
if (None != element.text):
if ((qName.namespace == self.gHTMLns) and (qName.localname in self.gCDataElements)):
if (self.mXHTML):
self._output(self._escapeXML(element.text)) # or self._output('<![CDATA[', element.text, ']]>')
else:
self._output(element.text)
else:
self._output(self._escapeXML(element.text))
for child in list(element):
self._serializeNode(child, namespacePrefixes)
self._output('</', qName.localname, '>')
if (None != element.tail):
self._output(self._escapeXML(element.tail))
def _serializeEntity(self, entity):
self._output(entity.text)
if (None != entity.tail):
self._output(self._escapeXML(entity.tail))
def _serializePI(self, pi):
if (self.mXHTML):
self._output('<?', pi.target, ' ', pi.text, '?>')
else:
raise Exception("Processing Instructions can't be converted to HTML")
if (None != pi.tail):
self._output(self._escapeXML(pi.tail))
def _serializeComment(self, comment):
self._output('<!--', comment.text, '-->') # XXX escape comment?
if (None != comment.tail):
self._output(self._escapeXML(comment.tail))
def _serializeNode(self, node, namespacePrefixes = None):
if (isinstance(node, etree._Entity)):
self._serializeEntity(node)
elif (isinstance(node, etree._ProcessingInstruction)):
self._serializePI(node)
elif (isinstance(node, etree._Comment)):
self._serializeComment(node)
else:
self._serializeElement(node, namespacePrefixes)
def _serializeTree(self, tree):
root = tree.getroot()
preceding = [node for node in root.itersiblings(preceding = True)]
preceding.reverse()
for node in preceding:
self._serializeNode(node)
self._serializeNode(root)
for node in root.itersiblings():
self._serializeNode(node)
def _serializeDoctype(self, tree, doctype, default):
if (doctype):
self._output(self.gDocTypes[doctype], '\n')
else:
if (hasattr(tree, 'docinfo') and tree.docinfo and tree.docinfo.doctype):
doctypeSearch = tree.docinfo.doctype.lower()
for doctype in self.gDocTypes:
if (self.gDocTypes[doctype].lower() == doctypeSearch):
break
else:
doctype = None
if (self.mXHTML):
if ('html' == doctype):
doctype = 'xhtml10'
elif ('html4' == doctype):
doctype = 'xhtml10'
elif ('html4-transitional' == doctype):
doctype = 'xhtml10-transitional'
elif ('html4-frameset' == doctype):
doctype = 'xhtml10-frameset'
else:
if ('xhtml10' == doctype):
doctype = 'html4'
elif ('xhtml10-transitional' == doctype):
doctype = 'html4-transitional'
elif ('xhtml10-frameset' == doctype):
doctype = 'html4-frameset'
elif ('xhtml11' == doctype):
doctype = 'html4'
if (doctype):
self._output(self.gDocTypes[doctype], '\n')
else:
self._output(tree.docinfo.doctype, '\n')
else:
self._output(self.gDocTypes[default], '\n')
def serializeHTML(self, tree, doctype = None):
self._reset()
self._serializeDoctype(tree, doctype, 'html')
self._serializeTree(tree)
return self.mOutput
def serializeXHTML(self, tree, doctype = None):
self._reset(True)
# XXX '<!xml ...' ??
self._serializeDoctype(tree, doctype, 'xhtml11')
self._serializeTree(tree)
return self.mOutput
|
mpl-2.0
|
davidak/PyZufall
|
pyzufall/helfer.py
|
1
|
4339
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pyzufall.helfer
---------------
Stellt diverse Hilfsfunktionen bereit.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
import os, sys
import codecs
import random as r
from datetime import datetime, date
# Regex Pattern
re_wort = r'^[A-Za-zäÄöÖüÜß-]+$'
re_worte = r'^[A-Za-zäÄöÖüÜß -]+$'
re_liste = r'^[A-Za-zäÄöÖüÜß -]+,[A-Za-zäÄöÖüÜß, -]+'
re_datum = r'^(0[1-9]|[12][0-9]|3[01]).(0[1-9]|1[0-2]).(19|20)[0-9]{2}$'
re_email = r'^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$'
re_satz = r'^[A-ZÄÖÜ]{1}[a-zA-Z0-9éäÄöÖüÜß ,-/.?"()]+[.!?]{1}$'
re_frage = r'^[A-ZÄÖÜ]{1}[a-zA-Z0-9éäÄöÖüÜß ,-/"()]+[?]{1}$'
def lese(dateiname):
"""
Liest die Textdatei mit dem übergebenen Namen aus data/ zeilenweise ein und gib eine Liste zurück.
Beispiel:
>>> liste = lese('baeume.txt')
`<http://stackoverflow.com/questions/10174211/make-an-always-relative-to-current-module-file-path>`_
:param dateiname: Dateiname inklusive Endung, z.B. *vornamen.txt*
:type dateiname: string
:rtype: list
.. only:: doctest
# überprüfe ob liste eine liste ist
>>> assert isinstance(liste, list)
"""
dateipfad = os.path.join(os.path.dirname(__file__), 'data/' + dateiname)
return codecs.open(dateipfad, 'r', 'utf-8').read().splitlines()
def chance(wahrscheinlichkeit, wert):
"""
Der übergebene Wert wird mit der gewählten Wahrscheinlichkeit zurückgegeben.
.. versionadded:: 0.11
:param wahrscheinlichkeit: int zwischen 1 und 100
:param wert: string
"""
if r.randint(1, 100) <= wahrscheinlichkeit:
return wert
else:
return ''
def erste_gross(s):
"""
Macht den ersten Buchstaben gross.
Beispiele:
>>> a = erste_gross('das ist ein Beispiel?')
>>> print(a)
Das ist ein Beispiel?
>>> b = erste_gross('über Stock und Stein.')
>>> print(b)
Über Stock und Stein.
>>> c = erste_gross('älter als das Internet!')
>>> print(c)
Älter als das Internet!
"""
return s[0].upper() + s[1:]
def str_add(wort, string):
"""
Fügt einen String ans Ende eines Wortes an, ohne doppelte Buchstaben zu erzeugen.
Beispiele:
>>> a = str_add('feige', 'er')
>>> print(a)
feiger
>>> b = str_add('feige', 'e')
>>> print(b)
feige
>>> c = str_add('blöd', 'e')
>>> print(c)
blöde
.. only:: docstest
>>> a = str_add('unflexibel', 'e')
>>> print(a)
unflexible
>>> b = str_add('unflexibel', 'er')
>>> print(b)
unflexibler
.. versionadded:: 0.11
"""
# wenn der letzte Buchstabe des wortes ist gleich der erste des strings
if wort[-1] == string[0]:
# gebe wort + alles ohne den ersten des strings zurück
s = wort + string[1:]
else:
s = wort + string
# behebt Fehler: unflexibele -> unflexible
if s[-3:] == 'ele':
s = s[:-3] + 'le'
# behebt Fehler: unflexibeler -> unflexibler
if s[-4:] == 'eler':
s = s[:-4] + 'ler'
return s
def aufzaehlung(liste):
"""
Erzeugt eine grammatikalisch korrekte Aufzählung aus einer Liste.
Beispiel:
>>> a = ['lesen', 'reiten', 'Freunde treffen']
>>> s = aufzaehlung(a)
>>> print(s)
lesen, reiten und Freunde treffen
>>> b = ['Überwachen', 'Strafen']
>>> s = aufzaehlung(b)
>>> print(s)
Überwachen und Strafen
>>> c = ['schlafen']
>>> s = aufzaehlung(c)
>>> print(s)
schlafen
:param liste: Eine Liste von Strings.
:type liste: list
:rtype: string
.. versionadded:: 0.12
"""
return "{}{}".format(', '.join(liste[:-2]) + ', ' if len(liste) > 2 else '', ' und '.join(liste[-2:]))
def alter(geburtsdatum):
"""
Berechnet das Alter in Jahren anhand des Geburtsdatums.
:rtype: int
.. only:: doctest
>>> s = alter("27.10.1988")
>>> assert s > 24
.. versionadded:: 0.12
"""
_heute = date.today()
_geburtstag = datetime.strptime(geburtsdatum, "%d.%m.%Y").date()
_alter = int((_heute - _geburtstag).days / 365.2425)
return _alter
def uml(s):
"""
Ersetzt Umlaute durch die entsprechenden 2 Buchstaben.
Beispiel:
>>> a = uml('Käse')
>>> print(a)
Kaese
>>> b = uml('Brötchen')
>>> print(b)
Broetchen
>>> c = uml('Gefühl')
>>> print(c)
Gefuehl
.. versionadded:: 0.13
"""
s = s.replace('ä', 'ae')
s = s.replace('ü', 'ue')
s = s.replace('ö', 'oe')
s = s.replace('ß', 'ss')
return s
|
gpl-3.0
|
ehogan/iris
|
lib/iris/tests/unit/analysis/cartography/test_project.py
|
5
|
5267
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :func:`iris.analysis.cartography.project`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import cartopy.crs as ccrs
import numpy as np
import iris.coord_systems
import iris.coords
import iris.cube
import iris.tests
import iris.tests.stock
from iris.analysis.cartography import project
ROBINSON = ccrs.Robinson()
def low_res_4d():
cube = iris.tests.stock.realistic_4d_no_derived()
cube = cube[0:2, 0:3, ::10, ::10]
cube.remove_coord('surface_altitude')
return cube
class TestAll(tests.IrisTest):
def setUp(self):
cs = iris.coord_systems.GeogCS(654321)
self.cube = iris.cube.Cube(np.zeros(25).reshape(5, 5))
self.cube.add_dim_coord(
iris.coords.DimCoord(np.arange(5), standard_name="latitude",
units='degrees', coord_system=cs), 0)
self.cube.add_dim_coord(
iris.coords.DimCoord(np.arange(5), standard_name="longitude",
units='degrees', coord_system=cs), 1)
self.tcs = iris.coord_systems.GeogCS(600000)
def test_is_iris_coord_system(self):
res, _ = project(self.cube, self.tcs)
self.assertEqual(res.coord('projection_y_coordinate').coord_system,
self.tcs)
self.assertEqual(res.coord('projection_x_coordinate').coord_system,
self.tcs)
self.assertIsNot(res.coord('projection_y_coordinate').coord_system,
self.tcs)
self.assertIsNot(res.coord('projection_x_coordinate').coord_system,
self.tcs)
def test_bad_resolution_negative(self):
cube = low_res_4d()
with self.assertRaises(ValueError):
project(cube, ROBINSON, nx=-200, ny=200)
def test_bad_resolution_non_numeric(self):
cube = low_res_4d()
with self.assertRaises(ValueError):
project(cube, ROBINSON, nx=200, ny='abc')
def test_missing_lat(self):
cube = low_res_4d()
cube.remove_coord('grid_latitude')
with self.assertRaises(ValueError):
project(cube, ROBINSON)
def test_missing_lon(self):
cube = low_res_4d()
cube.remove_coord('grid_longitude')
with self.assertRaises(ValueError):
project(cube, ROBINSON)
def test_missing_latlon(self):
cube = low_res_4d()
cube.remove_coord('grid_longitude')
cube.remove_coord('grid_latitude')
with self.assertRaises(ValueError):
project(cube, ROBINSON)
def test_default_resolution(self):
cube = low_res_4d()
new_cube, extent = project(cube, ROBINSON)
self.assertEqual(new_cube.shape, cube.shape)
def test_explicit_resolution(self):
cube = low_res_4d()
nx, ny = 5, 4
new_cube, extent = project(cube, ROBINSON, nx=nx, ny=ny)
self.assertEqual(new_cube.shape, cube.shape[:2] + (ny, nx))
def test_explicit_resolution_single_point(self):
cube = low_res_4d()
nx, ny = 1, 1
new_cube, extent = project(cube, ROBINSON, nx=nx, ny=ny)
self.assertEqual(new_cube.shape, cube.shape[:2] + (ny, nx))
def test_mismatched_coord_systems(self):
cube = low_res_4d()
cube.coord('grid_longitude').coord_system = None
with self.assertRaises(ValueError):
project(cube, ROBINSON)
def test_extent(self):
cube = low_res_4d()
_, extent = project(cube, ROBINSON)
self.assertEqual(extent, [-17005833.33052523, 17005833.33052523,
-8625155.12857459, 8625155.12857459])
def test_cube(self):
cube = low_res_4d()
new_cube, _ = project(cube, ROBINSON)
self.assertCMLApproxData(new_cube)
def test_no_coord_system(self):
cube = low_res_4d()
cube.coord('grid_longitude').coord_system = None
cube.coord('grid_latitude').coord_system = None
with iris.tests.mock.patch('warnings.warn') as warn:
_, _ = project(cube, ROBINSON)
warn.assert_called_once_with('Coordinate system of latitude and '
'longitude coordinates is not specified. '
'Assuming WGS84 Geodetic.')
if __name__ == '__main__':
tests.main()
|
lgpl-3.0
|
lucafavatella/intellij-community
|
python/lib/Lib/distutils/command/install_scripts.py
|
80
|
2183
|
"""distutils.command.install_scripts
Implements the Distutils 'install_scripts' command, for installing
Python scripts."""
# contributed by Bastian Kleineidam
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install_scripts.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.core import Command
from distutils import log
from stat import ST_MODE
class install_scripts (Command):
description = "install scripts (Python or otherwise)"
user_options = [
('install-dir=', 'd', "directory to install scripts to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
def initialize_options (self):
self.install_dir = None
self.force = 0
self.build_dir = None
self.skip_build = None
def finalize_options (self):
self.set_undefined_options('build', ('build_scripts', 'build_dir'))
self.set_undefined_options('install',
('install_scripts', 'install_dir'),
('force', 'force'),
('skip_build', 'skip_build'),
)
def run (self):
if not self.skip_build:
self.run_command('build_scripts')
self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
if hasattr(os, 'chmod'):
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
log.info("changing mode of %s", file)
else:
mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
log.info("changing mode of %s to %o", file, mode)
os.chmod(file, mode)
def get_inputs (self):
return self.distribution.scripts or []
def get_outputs(self):
return self.outfiles or []
# class install_scripts
|
apache-2.0
|
deepchem/deepchem
|
deepchem/models/torch_models/torch_model.py
|
2
|
46355
|
import numpy as np
import torch
try:
import torch.utils.tensorboard
_has_tensorboard = True
except:
_has_tensorboard = False
import time
import logging
import os
try:
from collections.abc import Sequence as SequenceCollection
except:
from collections import Sequence as SequenceCollection
from deepchem.data import Dataset, NumpyDataset
from deepchem.metrics import Metric
from deepchem.models.losses import Loss
from deepchem.models.models import Model
from deepchem.models.optimizers import Adam, Optimizer, LearningRateSchedule
from deepchem.trans import Transformer, undo_transforms
from deepchem.utils.evaluate import GeneratorEvaluator
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from deepchem.utils.typing import ArrayLike, LossFn, OneOrMany
from deepchem.models.wandblogger import WandbLogger
try:
import wandb
wandb.ensure_configured()
if wandb.api.api_key is None:
_has_wandb = False
wandb.termwarn(
"W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable."
)
else:
_has_wandb = True
except (ImportError, AttributeError):
_has_wandb = False
logger = logging.getLogger(__name__)
class TorchModel(Model):
"""This is a DeepChem model implemented by a PyTorch model.
Here is a simple example of code that uses TorchModel to train
a PyTorch model on a DeepChem dataset.
>> pytorch_model = torch.nn.Sequential(
>> torch.nn.Linear(100, 1000),
>> torch.nn.Tanh(),
>> torch.nn.Linear(1000, 1))
>> model = TorchModel(pytorch_model, loss=dc.models.losses.L2Loss())
>> model.fit(dataset)
The loss function for a model can be defined in two different
ways. For models that have only a single output and use a
standard loss function, you can simply provide a
dc.models.losses.Loss object. This defines the loss for each
sample or sample/task pair. The result is automatically
multiplied by the weights and averaged over the batch.
For more complicated cases, you can instead provide a function
that directly computes the total loss. It must be of the form
f(outputs, labels, weights), taking the list of outputs from
the model, the expected values, and any weight matrices. It
should return a scalar equal to the value of the loss function
for the batch. No additional processing is done to the
result; it is up to you to do any weighting, averaging, adding
of penalty terms, etc.
You can optionally provide an output_types argument, which
describes how to interpret the model's outputs. This should
be a list of strings, one for each output. You can use an
arbitrary output_type for a output, but some output_types are
special and will undergo extra processing:
- 'prediction': This is a normal output, and will be returned by predict().
If output types are not specified, all outputs are assumed
to be of this type.
- 'loss': This output will be used in place of the normal
outputs for computing the loss function. For example,
models that output probability distributions usually do it
by computing unbounded numbers (the logits), then passing
them through a softmax function to turn them into
probabilities. When computing the cross entropy, it is more
numerically stable to use the logits directly rather than
the probabilities. You can do this by having the model
produce both probabilities and logits as outputs, then
specifying output_types=['prediction', 'loss']. When
predict() is called, only the first output (the
probabilities) will be returned. But during training, it is
the second output (the logits) that will be passed to the
loss function.
- 'variance': This output is used for estimating the
uncertainty in another output. To create a model that can
estimate uncertainty, there must be the same number of
'prediction' and 'variance' outputs. Each variance output
must have the same shape as the corresponding prediction
output, and each element is an estimate of the variance in
the corresponding prediction. Also be aware that if a model
supports uncertainty, it MUST use dropout on every layer,
and dropout most be enabled during uncertainty prediction.
Otherwise, the uncertainties it computes will be inaccurate.
- other: Arbitrary output_types can be used to extract outputs
produced by the model, but will have no additional
processing performed.
"""
def __init__(self,
model: torch.nn.Module,
loss: Union[Loss, LossFn],
output_types: Optional[List[str]] = None,
batch_size: int = 100,
model_dir: Optional[str] = None,
learning_rate: Union[float, LearningRateSchedule] = 0.001,
optimizer: Optional[Optimizer] = None,
tensorboard: bool = False,
wandb: bool = False,
log_frequency: int = 100,
device: Optional[torch.device] = None,
regularization_loss: Optional[Callable] = None,
wandb_logger: Optional[WandbLogger] = None,
**kwargs) -> None:
"""Create a new TorchModel.
Parameters
----------
model: torch.nn.Module
the PyTorch model implementing the calculation
loss: dc.models.losses.Loss or function
a Loss or function defining how to compute the training loss for each
batch, as described above
output_types: list of strings, optional (default None)
the type of each output from the model, as described above
batch_size: int, optional (default 100)
default batch size for training and evaluating
model_dir: str, optional (default None)
the directory on disk where the model will be stored. If this is None,
a temporary directory is created.
learning_rate: float or LearningRateSchedule, optional (default 0.001)
the learning rate to use for fitting. If optimizer is specified, this is
ignored.
optimizer: Optimizer, optional (default None)
the optimizer to use for fitting. If this is specified, learning_rate is
ignored.
tensorboard: bool, optional (default False)
whether to log progress to TensorBoard during training
wandb: bool, optional (default False)
whether to log progress to Weights & Biases during training
log_frequency: int, optional (default 100)
The frequency at which to log data. Data is logged using
`logging` by default. If `tensorboard` is set, data is also
logged to TensorBoard. If `wandb` is set, data is also logged
to Weights & Biases. Logging happens at global steps. Roughly,
a global step corresponds to one batch of training. If you'd
like a printout every 10 batch steps, you'd set
`log_frequency=10` for example.
device: torch.device, optional (default None)
the device on which to run computations. If None, a device is
chosen automatically.
regularization_loss: Callable, optional
a function that takes no arguments, and returns an extra contribution to add
to the loss function
wandb_logger: WandbLogger
the Weights & Biases logger object used to log data and metrics
"""
super(TorchModel, self).__init__(model=model, model_dir=model_dir, **kwargs)
if isinstance(loss, Loss):
self._loss_fn: LossFn = _StandardLoss(self, loss)
else:
self._loss_fn = loss
self.batch_size = batch_size
if optimizer is None:
self.optimizer: Optimizer = Adam(learning_rate=learning_rate)
else:
self.optimizer = optimizer
self.tensorboard = tensorboard
self.regularization_loss = regularization_loss
# Select a device.
if device is None:
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
self.device = device
self.model = model.to(device)
# W&B logging
if wandb:
logger.warning(
"`wandb` argument is deprecated. Please use `wandb_logger` instead. "
"This argument will be removed in a future release of DeepChem.")
if wandb and not _has_wandb:
logger.warning(
"You set wandb to True but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
self.wandb = wandb and _has_wandb
self.wandb_logger = wandb_logger
# If `wandb=True` and no logger is provided, initialize default logger
if self.wandb and (self.wandb_logger is None):
self.wandb_logger = WandbLogger()
# Setup and initialize W&B logging
if (self.wandb_logger is not None) and (not self.wandb_logger.initialized):
self.wandb_logger.setup()
# Update config with KerasModel params
wandb_logger_config = dict(
loss=loss,
output_types=output_types,
batch_size=batch_size,
model_dir=model_dir,
learning_rate=learning_rate,
optimizer=optimizer,
tensorboard=tensorboard,
log_frequency=log_frequency,
regularization_loss=regularization_loss)
wandb_logger_config.update(**kwargs)
if self.wandb_logger is not None:
self.wandb_logger.update_config(wandb_logger_config)
self.log_frequency = log_frequency
if self.tensorboard and not _has_tensorboard:
raise ImportError("This class requires tensorboard to be installed.")
if self.tensorboard:
self._summary_writer = torch.utils.tensorboard.SummaryWriter(
self.model_dir)
if output_types is None:
self._prediction_outputs = None
self._loss_outputs = None
self._variance_outputs = None
self._other_outputs = None
else:
self._prediction_outputs = []
self._loss_outputs = []
self._variance_outputs = []
self._other_outputs = []
for i, type in enumerate(output_types):
if type == 'prediction':
self._prediction_outputs.append(i)
elif type == 'loss':
self._loss_outputs.append(i)
elif type == 'variance':
self._variance_outputs.append(i)
else:
self._other_outputs.append(i)
if len(self._loss_outputs) == 0:
self._loss_outputs = self._prediction_outputs
self._built = False
self._output_functions: Dict[Any, Any] = {}
self._optimizer_for_vars: Dict[Any, Any] = {}
def _ensure_built(self) -> None:
"""The first time this is called, create internal data structures."""
if self._built:
return
self._built = True
self._global_step = 0
self._pytorch_optimizer = self.optimizer._create_pytorch_optimizer(
self.model.parameters())
if isinstance(self.optimizer.learning_rate, LearningRateSchedule):
self._lr_schedule = self.optimizer.learning_rate._create_pytorch_schedule(
self._pytorch_optimizer)
else:
self._lr_schedule = None
def fit(self,
dataset: Dataset,
nb_epoch: int = 10,
max_checkpoints_to_keep: int = 5,
checkpoint_interval: int = 1000,
deterministic: bool = False,
restore: bool = False,
variables: Optional[List[torch.nn.Parameter]] = None,
loss: Optional[LossFn] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
"""Train this model on a dataset.
Parameters
----------
dataset: Dataset
the Dataset to train on
nb_epoch: int
the number of epochs to train for
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in training steps.
Set this to 0 to disable automatic checkpointing.
deterministic: bool
if True, the samples are processed in order. If False, a different random
order is used for each epoch.
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
variables: list of torch.nn.Parameter
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
all_losses: Optional[List[float]], optional (default None)
If specified, all logged losses are appended into this list. Note that
you can call `fit()` repeatedly with the same list and losses will
continue to be appended.
Returns
-------
The average loss over the most recent checkpoint interval
"""
return self.fit_generator(
self.default_generator(
dataset, epochs=nb_epoch,
deterministic=deterministic), max_checkpoints_to_keep,
checkpoint_interval, restore, variables, loss, callbacks, all_losses)
def fit_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
max_checkpoints_to_keep: int = 5,
checkpoint_interval: int = 1000,
restore: bool = False,
variables: Optional[List[torch.nn.Parameter]] = None,
loss: Optional[LossFn] = None,
callbacks: Union[Callable, List[Callable]] = [],
all_losses: Optional[List[float]] = None) -> float:
"""Train this model on data from a generator.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
checkpoint_interval: int
the frequency at which to write checkpoints, measured in training steps.
Set this to 0 to disable automatic checkpointing.
restore: bool
if True, restore the model from the most recent checkpoint and continue training
from there. If False, retrain the model from scratch.
variables: list of torch.nn.Parameter
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
all_losses: Optional[List[float]], optional (default None)
If specified, all logged losses are appended into this list. Note that
you can call `fit()` repeatedly with the same list and losses will
continue to be appended.
Returns
-------
The average loss over the most recent checkpoint interval
"""
if not isinstance(callbacks, SequenceCollection):
callbacks = [callbacks]
self._ensure_built()
self.model.train()
avg_loss = 0.0
last_avg_loss = 0.0
averaged_batches = 0
if loss is None:
loss = self._loss_fn
if variables is None:
optimizer = self._pytorch_optimizer
lr_schedule = self._lr_schedule
else:
var_key = tuple(variables)
if var_key in self._optimizer_for_vars:
optimizer, lr_schedule = self._optimizer_for_vars[var_key]
else:
optimizer = self.optimizer._create_pytorch_optimizer(variables)
if isinstance(self.optimizer.learning_rate, LearningRateSchedule):
lr_schedule = self.optimizer.learning_rate._create_pytorch_schedule(
optimizer)
else:
lr_schedule = None
self._optimizer_for_vars[var_key] = (optimizer, lr_schedule)
time1 = time.time()
# Main training loop.
for batch in generator:
if restore:
self.restore()
restore = False
inputs: OneOrMany[torch.Tensor]
inputs, labels, weights = self._prepare_batch(batch)
# Execute the loss function, accumulating the gradients.
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
optimizer.zero_grad()
outputs = self.model(inputs)
if isinstance(outputs, torch.Tensor):
outputs = [outputs]
if self._loss_outputs is not None:
outputs = [outputs[i] for i in self._loss_outputs]
batch_loss = loss(outputs, labels, weights)
batch_loss.backward()
optimizer.step()
if lr_schedule is not None:
lr_schedule.step()
self._global_step += 1
current_step = self._global_step
avg_loss += batch_loss
# Report progress and write checkpoints.
averaged_batches += 1
should_log = (current_step % self.log_frequency == 0)
if should_log:
avg_loss = float(avg_loss) / averaged_batches
logger.info(
'Ending global_step %d: Average loss %g' % (current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
# Capture the last avg_loss in case of return since we're resetting to 0 now
last_avg_loss = avg_loss
avg_loss = 0.0
averaged_batches = 0
if checkpoint_interval > 0 and current_step % checkpoint_interval == checkpoint_interval - 1:
self.save_checkpoint(max_checkpoints_to_keep)
for c in callbacks:
c(self, current_step)
if self.tensorboard and should_log:
self._log_scalar_to_tensorboard('loss', batch_loss, current_step)
if (self.wandb_logger is not None) and should_log:
all_data = dict({'train/loss': batch_loss})
self.wandb_logger.log_data(all_data, step=current_step)
# Close WandbLogger
if self.wandb_logger is not None:
self.wandb_logger.finish()
# Report final results.
if averaged_batches > 0:
avg_loss = float(avg_loss) / averaged_batches
logger.info(
'Ending global_step %d: Average loss %g' % (current_step, avg_loss))
if all_losses is not None:
all_losses.append(avg_loss)
last_avg_loss = avg_loss
if checkpoint_interval > 0:
self.save_checkpoint(max_checkpoints_to_keep)
time2 = time.time()
logger.info("TIMING: model fitting took %0.3f s" % (time2 - time1))
return last_avg_loss
def fit_on_batch(self,
X: Sequence,
y: Sequence,
w: Sequence,
variables: Optional[List[torch.nn.Parameter]] = None,
loss: Optional[LossFn] = None,
callbacks: Union[Callable, List[Callable]] = [],
checkpoint: bool = True,
max_checkpoints_to_keep: int = 5) -> float:
"""Perform a single step of training.
Parameters
----------
X: ndarray
the inputs for the batch
y: ndarray
the labels for the batch
w: ndarray
the weights for the batch
variables: list of torch.nn.Parameter
the variables to train. If None (the default), all trainable variables in
the model are used.
loss: function
a function of the form f(outputs, labels, weights) that computes the loss
for each batch. If None (the default), the model's standard loss function
is used.
callbacks: function or list of functions
one or more functions of the form f(model, step) that will be invoked after
every step. This can be used to perform validation, logging, etc.
checkpoint: bool
if true, save a checkpoint after performing the training step
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
Returns
-------
the loss on the batch
"""
self._ensure_built()
dataset = NumpyDataset(X, y, w)
return self.fit(
dataset,
nb_epoch=1,
max_checkpoints_to_keep=max_checkpoints_to_keep,
checkpoint_interval=self._global_step + 2 if checkpoint else 0,
variables=variables,
loss=loss,
callbacks=callbacks)
def _predict(
self, generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[Transformer], uncertainty: bool,
other_output_types: Optional[OneOrMany[str]]) -> OneOrMany[np.ndarray]:
"""
Predict outputs for data provided by a generator.
This is the private implementation of prediction. Do not
call it directly. Instead call one of the public prediction
methods.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
uncertainty: bool
specifies whether this is being called as part of estimating uncertainty.
If True, it sets the training flag so that dropout will be enabled, and
returns the values of the uncertainty outputs.
other_output_types: list, optional
Provides a list of other output_types (strings) to predict from model.
Returns:
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
results: Optional[List[List[np.ndarray]]] = None
variances: Optional[List[List[np.ndarray]]] = None
if uncertainty and (other_output_types is not None):
raise ValueError(
'This model cannot compute uncertainties and other output types simultaneously. Please invoke one at a time.'
)
if uncertainty:
if self._variance_outputs is None or len(self._variance_outputs) == 0:
raise ValueError('This model cannot compute uncertainties')
if len(self._variance_outputs) != len(self._prediction_outputs):
raise ValueError(
'The number of variances must exactly match the number of outputs')
if other_output_types:
if self._other_outputs is None or len(self._other_outputs) == 0:
raise ValueError(
'This model cannot compute other outputs since no other output_types were specified.'
)
self._ensure_built()
self.model.eval()
for batch in generator:
inputs, labels, weights = batch
inputs, _, _ = self._prepare_batch((inputs, None, None))
# Invoke the model.
if isinstance(inputs, list) and len(inputs) == 1:
inputs = inputs[0]
output_values = self.model(inputs)
if isinstance(output_values, torch.Tensor):
output_values = [output_values]
output_values = [t.detach().cpu().numpy() for t in output_values]
# Apply tranformers and record results.
if uncertainty:
var = [output_values[i] for i in self._variance_outputs]
if variances is None:
variances = [var]
else:
for i, t in enumerate(var):
variances[i].append(t)
access_values = []
if other_output_types:
access_values += self._other_outputs
elif self._prediction_outputs is not None:
access_values += self._prediction_outputs
if len(access_values) > 0:
output_values = [output_values[i] for i in access_values]
if len(transformers) > 0:
if len(output_values) > 1:
raise ValueError(
"predict() does not support Transformers for models with multiple outputs."
)
elif len(output_values) == 1:
output_values = [undo_transforms(output_values[0], transformers)]
if results is None:
results = [[] for i in range(len(output_values))]
for i, t in enumerate(output_values):
results[i].append(t)
# Concatenate arrays to create the final results.
final_results = []
final_variances = []
if results is not None:
for r in results:
final_results.append(np.concatenate(r, axis=0))
if uncertainty and variances is not None:
for v in variances:
final_variances.append(np.concatenate(v, axis=0))
return zip(final_results, final_variances)
if len(final_results) == 1:
return final_results[0]
else:
return final_results
def predict_on_generator(
self,
generator: Iterable[Tuple[Any, Any, Any]],
transformers: List[Transformer] = [],
output_types: Optional[OneOrMany[str]] = None) -> OneOrMany[np.ndarray]:
"""
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
output_types: String or list of Strings
If specified, all outputs of this type will be retrieved
from the model. If output_types is specified, outputs must
be None.
Returns:
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
return self._predict(generator, transformers, False, output_types)
def predict_on_batch(self, X: ArrayLike, transformers: List[Transformer] = []
) -> OneOrMany[np.ndarray]:
"""Generates predictions for input samples, processing samples in a batch.
Parameters
----------
X: ndarray
the input data, as a Numpy array.
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
dataset = NumpyDataset(X=X, y=None)
return self.predict(dataset, transformers)
def predict_uncertainty_on_batch(self, X: Sequence, masks: int = 50
) -> OneOrMany[Tuple[np.ndarray, np.ndarray]]:
"""
Predict the model's outputs, along with the uncertainty in each one.
The uncertainty is computed as described in https://arxiv.org/abs/1703.04977.
It involves repeating the prediction many times with different dropout masks.
The prediction is computed as the average over all the predictions. The
uncertainty includes both the variation among the predicted values (epistemic
uncertainty) and the model's own estimates for how well it fits the data
(aleatoric uncertainty). Not all models support uncertainty prediction.
Parameters
----------
X: ndarray
the input data, as a Numpy array.
masks: int
the number of dropout masks to average over
Returns
-------
for each output, a tuple (y_pred, y_std) where y_pred is the predicted
value of the output, and each element of y_std estimates the standard
deviation of the corresponding element of y_pred
"""
dataset = NumpyDataset(X=X, y=None)
return self.predict_uncertainty(dataset, masks)
def predict(
self,
dataset: Dataset,
transformers: List[Transformer] = [],
output_types: Optional[List[str]] = None) -> OneOrMany[np.ndarray]:
"""
Uses self to make predictions on provided Dataset object.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
output_types: String or list of Strings
If specified, all outputs of this type will be retrieved
from the model. If output_types is specified, outputs must
be None.
Returns
-------
a NumPy array of the model produces a single output, or a list of arrays
if it produces multiple outputs
"""
generator = self.default_generator(
dataset, mode='predict', pad_batches=False)
return self.predict_on_generator(
generator, transformers=transformers, output_types=output_types)
def predict_embedding(self, dataset: Dataset) -> OneOrMany[np.ndarray]:
"""
Predicts embeddings created by underlying model if any exist.
An embedding must be specified to have `output_type` of
`'embedding'` in the model definition.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
Returns
-------
a NumPy array of the embeddings model produces, or a list
of arrays if it produces multiple embeddings
"""
generator = self.default_generator(
dataset, mode='predict', pad_batches=False)
return self._predict(generator, [], False, ['embedding'])
def predict_uncertainty(self, dataset: Dataset, masks: int = 50
) -> OneOrMany[Tuple[np.ndarray, np.ndarray]]:
"""
Predict the model's outputs, along with the uncertainty in each one.
The uncertainty is computed as described in https://arxiv.org/abs/1703.04977.
It involves repeating the prediction many times with different dropout masks.
The prediction is computed as the average over all the predictions. The
uncertainty includes both the variation among the predicted values (epistemic
uncertainty) and the model's own estimates for how well it fits the data
(aleatoric uncertainty). Not all models support uncertainty prediction.
Parameters
----------
dataset: dc.data.Dataset
Dataset to make prediction on
masks: int
the number of dropout masks to average over
Returns
-------
for each output, a tuple (y_pred, y_std) where y_pred is the predicted
value of the output, and each element of y_std estimates the standard
deviation of the corresponding element of y_pred
"""
sum_pred: List[np.ndarray] = []
sum_sq_pred: List[np.ndarray] = []
sum_var: List[np.ndarray] = []
for i in range(masks):
generator = self.default_generator(
dataset, mode='uncertainty', pad_batches=False)
results = self._predict(generator, [], True, None)
if len(sum_pred) == 0:
for p, v in results:
sum_pred.append(p)
sum_sq_pred.append(p * p)
sum_var.append(v)
else:
for j, (p, v) in enumerate(results):
sum_pred[j] += p
sum_sq_pred[j] += p * p
sum_var[j] += v
output = []
std = []
for i in range(len(sum_pred)):
p = sum_pred[i] / masks
output.append(p)
std.append(np.sqrt(sum_sq_pred[i] / masks - p * p + sum_var[i] / masks))
if len(output) == 1:
return (output[0], std[0])
else:
return list(zip(output, std))
def evaluate_generator(self,
generator: Iterable[Tuple[Any, Any, Any]],
metrics: List[Metric],
transformers: List[Transformer] = [],
per_task_metrics: bool = False):
"""Evaluate the performance of this model on the data produced by a generator.
Parameters
----------
generator: generator
this should generate batches, each represented as a tuple of the form
(inputs, labels, weights).
metric: list of deepchem.metrics.Metric
Evaluation metric
transformers: list of dc.trans.Transformers
Transformers that the input data has been transformed by. The output
is passed through these transformers to undo the transformations.
per_task_metrics: bool
If True, return per-task scores.
Returns
-------
dict
Maps tasks to scores under metric.
"""
evaluator = GeneratorEvaluator(self, generator, transformers)
return evaluator.compute_model_performance(metrics, per_task_metrics)
def compute_saliency(self, X: np.ndarray) -> OneOrMany[np.ndarray]:
"""Compute the saliency map for an input sample.
This computes the Jacobian matrix with the derivative of each output element
with respect to each input element. More precisely,
- If this model has a single output, it returns a matrix of shape
(output_shape, input_shape) with the derivatives.
- If this model has multiple outputs, it returns a list of matrices, one
for each output.
This method cannot be used on models that take multiple inputs.
Parameters
----------
X: ndarray
the input data for a single sample
Returns
-------
the Jacobian matrix, or a list of matrices
"""
input_shape = X.shape
X = np.reshape(X, [1] + list(X.shape))
self._ensure_built()
X_batch, _, _ = self._prepare_batch(([X], None, None))
# Compute the gradients.
X_tensor = X_batch[0]
X_tensor.requires_grad_(True)
outputs = self.model(X_tensor)
if isinstance(outputs, torch.Tensor):
outputs = [outputs]
final_result = []
for output in outputs:
output_shape = tuple(output.shape[1:])
output = output.reshape([-1])
result = []
grad_output = torch.zeros(output.shape[0], device=self.device)
for i in range(output.shape[0]):
grad_output.zero_()
grad_output[i] = 1
output.backward(grad_output, retain_graph=True)
result.append(X_tensor.grad.clone())
X_tensor.grad.zero_()
final_result.append(
torch.reshape(torch.stack(result),
output_shape + input_shape).cpu().numpy())
if len(final_result) == 1:
return final_result[0]
return final_result
def _prepare_batch(
self, batch: Tuple[Any, Any, Any]
) -> Tuple[List[torch.Tensor], List[torch.Tensor], List[torch.Tensor]]:
inputs, labels, weights = batch
inputs = [
x.astype(np.float32) if x.dtype == np.float64 else x for x in inputs
]
input_tensors = [torch.as_tensor(x, device=self.device) for x in inputs]
if labels is not None:
labels = [
x.astype(np.float32) if x.dtype == np.float64 else x for x in labels
]
label_tensors = [torch.as_tensor(x, device=self.device) for x in labels]
else:
label_tensors = []
if weights is not None:
weights = [
x.astype(np.float32) if x.dtype == np.float64 else x for x in weights
]
weight_tensors = [torch.as_tensor(x, device=self.device) for x in weights]
else:
weight_tensors = []
return (input_tensors, label_tensors, weight_tensors)
def default_generator(
self,
dataset: Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = True) -> Iterable[Tuple[List, List, List]]:
"""Create a generator that iterates batches for a dataset.
Subclasses may override this method to customize how model inputs are
generated from the data.
Parameters
----------
dataset: Dataset
the data to iterate
epochs: int
the number of times to iterate over the full dataset
mode: str
allowed values are 'fit' (called during training), 'predict' (called
during prediction), and 'uncertainty' (called during uncertainty
prediction)
deterministic: bool
whether to iterate over the dataset in order, or randomly shuffle the
data for each epoch
pad_batches: bool
whether to pad each batch up to this model's preferred batch size
Returns
-------
a generator that iterates batches, each represented as a tuple of lists:
([inputs], [outputs], [weights])
"""
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
yield ([X_b], [y_b], [w_b])
def save_checkpoint(self,
max_checkpoints_to_keep: int = 5,
model_dir: Optional[str] = None) -> None:
"""Save a checkpoint to disk.
Usually you do not need to call this method, since fit() saves checkpoints
automatically. If you have disabled automatic checkpointing during fitting,
this can be called to manually write checkpoints.
Parameters
----------
max_checkpoints_to_keep: int
the maximum number of checkpoints to keep. Older checkpoints are discarded.
model_dir: str, default None
Model directory to save checkpoint to. If None, revert to self.model_dir
"""
self._ensure_built()
if model_dir is None:
model_dir = self.model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# Save the checkpoint to a file.
data = {
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self._pytorch_optimizer.state_dict(),
'global_step': self._global_step
}
temp_file = os.path.join(model_dir, 'temp_checkpoint.pt')
torch.save(data, temp_file)
# Rename and delete older files.
paths = [
os.path.join(model_dir, 'checkpoint%d.pt' % (i + 1))
for i in range(max_checkpoints_to_keep)
]
if os.path.exists(paths[-1]):
os.remove(paths[-1])
for i in reversed(range(max_checkpoints_to_keep - 1)):
if os.path.exists(paths[i]):
os.rename(paths[i], paths[i + 1])
os.rename(temp_file, paths[0])
def get_checkpoints(self, model_dir: Optional[str] = None):
"""Get a list of all available checkpoint files.
Parameters
----------
model_dir: str, default None
Directory to get list of checkpoints from. Reverts to self.model_dir if None
"""
if model_dir is None:
model_dir = self.model_dir
files = sorted(os.listdir(model_dir))
files = [
f for f in files if f.startswith('checkpoint') and f.endswith('.pt')
]
return [os.path.join(model_dir, f) for f in files]
def restore(self,
checkpoint: Optional[str] = None,
model_dir: Optional[str] = None) -> None:
"""Reload the values of all variables from a checkpoint file.
Parameters
----------
checkpoint: str
the path to the checkpoint file to load. If this is None, the most recent
checkpoint will be chosen automatically. Call get_checkpoints() to get a
list of all available checkpoints.
model_dir: str, default None
Directory to restore checkpoint from. If None, use self.model_dir. If
checkpoint is not None, this is ignored.
"""
self._ensure_built()
if checkpoint is None:
checkpoints = sorted(self.get_checkpoints(model_dir))
if len(checkpoints) == 0:
raise ValueError('No checkpoint found')
checkpoint = checkpoints[0]
data = torch.load(checkpoint)
self.model.load_state_dict(data['model_state_dict'])
self._pytorch_optimizer.load_state_dict(data['optimizer_state_dict'])
self._global_step = data['global_step']
def get_global_step(self) -> int:
"""Get the number of steps of fitting that have been performed."""
return self._global_step
def _log_scalar_to_tensorboard(self, name: str, value: Any, step: int):
"""Log a scalar value to Tensorboard."""
self._summary_writer.add_scalar(name, value, step)
def _create_assignment_map(self,
source_model: "TorchModel",
include_top: bool = True,
**kwargs) -> Dict[Any, Any]:
"""
Creates a default assignment map between parameters of source and current model.
This is used only when a custom assignment map is missing. This assumes the
model is made of different layers followed by a dense layer for mapping to
output tasks. include_top is used to control whether or not the final dense
layer is used. The default assignment map is useful in cases where the type
of task is different (classification vs regression) and/or number of tasks.
Parameters
----------
source_model: dc.models.TorchModel
Source model to copy parameter values from.
include_top: bool, default True
if true, copies the last dense layer
"""
assignment_map: Dict[Any, Any] = {}
source_vars = list(source_model.model.parameters())
dest_vars = list(self.model.parameters())
if not include_top:
source_vars = source_vars[:-2]
dest_vars = dest_vars[:-2]
for source_var, dest_var in zip(source_vars, dest_vars):
assignment_map[source_var] = dest_var
return assignment_map
def _create_value_map(self, source_model: "TorchModel",
**kwargs) -> Dict[Any, Any]:
"""
Creates a value map between parameters in the source model and their
current values. This is used only when a custom value map is missing, and
assumes the restore method has been called.
Parameters
----------
source_model: dc.models.TorchModel
Source model to create value map from
"""
value_map: Dict[Any, Any] = {}
source_vars = list(source_model.model.parameters())
for source_var in source_vars:
value_map[source_var] = source_var.detach().cpu().numpy()
return value_map
def load_from_pretrained(self,
source_model: "TorchModel",
assignment_map: Optional[Dict[Any, Any]] = None,
value_map: Optional[Dict[Any, Any]] = None,
checkpoint: Optional[str] = None,
model_dir: Optional[str] = None,
include_top: bool = True,
inputs: Optional[Sequence[Any]] = None,
**kwargs) -> None:
"""Copies parameter values from a pretrained model. `source_model` can either
be a pretrained model or a model with the same architecture. `value_map`
is a parameter-value dictionary. If no `value_map` is provided, the parameter
values are restored to the `source_model` from a checkpoint and a default
`value_map` is created. `assignment_map` is a dictionary mapping parameters
from the `source_model` to the current model. If no `assignment_map` is
provided, one is made from scratch and assumes the model is composed of
several different layers, with the final one being a dense layer. include_top
is used to control whether or not the final dense layer is used. The default
assignment map is useful in cases where the type of task is different
(classification vs regression) and/or number of tasks in the setting.
Parameters
----------
source_model: dc.TorchModel, required
source_model can either be the pretrained model or a dc.TorchModel with
the same architecture as the pretrained model. It is used to restore from
a checkpoint, if value_map is None and to create a default assignment map
if assignment_map is None
assignment_map: Dict, default None
Dictionary mapping the source_model parameters and current model parameters
value_map: Dict, default None
Dictionary containing source_model trainable parameters mapped to numpy
arrays. If value_map is None, the values are restored and a default
parameter map is created using the restored values
checkpoint: str, default None
the path to the checkpoint file to load. If this is None, the most recent
checkpoint will be chosen automatically. Call get_checkpoints() to get a
list of all available checkpoints
model_dir: str, default None
Restore model from custom model directory if needed
include_top: bool, default True
if True, copies the weights and bias associated with the final dense
layer. Used only when assignment map is None
inputs: List, input tensors for model
if not None, then the weights are built for both the source and self.
"""
if inputs is not None:
# Ensure weights for both models are built.
source_model.model(inputs)
self.model(inputs)
self._ensure_built()
if value_map is None:
logger.info(
"No value map provided. Creating default value map from restored model."
)
source_model.restore(model_dir=model_dir, checkpoint=checkpoint)
value_map = self._create_value_map(source_model=source_model)
if assignment_map is None:
logger.info("No assignment map provided. Creating custom assignment map.")
assignment_map = self._create_assignment_map(
source_model=source_model, include_top=include_top)
for source_var, dest_var in assignment_map.items():
assert source_var.shape == dest_var.shape
dest_var.data = torch.as_tensor(value_map[source_var], device=self.device)
class _StandardLoss(object):
"""The implements the loss function for models that use a dc.models.losses.Loss."""
def __init__(self, model: TorchModel, loss: Loss) -> None:
self.model = model
self.loss = loss # not used
self.criterion = loss._create_pytorch_loss()
def __call__(self, outputs: List, labels: List, weights: List) -> float:
if len(outputs) != 1 or len(labels) != 1 or len(weights) != 1:
raise ValueError(
"Loss functions expects exactly one each of outputs, labels, and weights"
)
losses = self.criterion(outputs[0], labels[0])
w = weights[0]
if len(w.shape) < len(losses.shape):
if isinstance(w, torch.Tensor):
shape = tuple(w.shape)
else:
shape = w.shape
shape = tuple(-1 if x is None else x for x in shape)
w = w.reshape(shape + (1,) * (len(losses.shape) - len(w.shape)))
loss = losses * w
loss = loss.mean()
if self.model.regularization_loss is not None:
loss += self.model.regularization_loss()
return loss
|
mit
|
phillipjones2/taskbuster-boilerplate
|
functional_tests/test_all_users.py
|
2
|
2536
|
#-*- coding: utf-8 -*-
from selenium import webdriver
from django.core.urlresolvers import reverse
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.utils.translation import activate
from datetime import date
from django.utils import formats
class HomeNewVisitorTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
activate('en')
def tearDown(self):
self.browser.quit()
def get_full_url(self, namespace):
return self.live_server_url + reverse(namespace)
def test_home_title(self):
self.browser.get(self.get_full_url("home"))
self.assertIn('TaskBuster', self.browser.title)
def test_h1_css(self):
self.browser.get(self.get_full_url("home"))
h1 = self.browser.find_element_by_tag_name("h1")
self.assertEqual(h1.value_of_css_property("color"),"rgba(200, 50, 255, 1)")
def test_home_files(self):
self.browser.get(self.live_server_url + "/robots.txt")
self.assertNotIn("Page Not Found", self.browser.title)
self.browser.get(self.live_server_url + "/humans.txt")
self.assertNotIn("Page Not Found", self.browser.title)
def test_internationalization(self):
for lang, h1_text in [('en', 'Welcome to TaskBuster!'),
('ca', 'Benvingut a TaskBuster!'),
('es-mx','Bienvenido a TaskBuster!')]:
activate(lang)
self.browser.get(self.get_full_url("home"))
h1 = self.browser.find_element_by_tag_name("h1")
self.assertEqual(h1.text, h1_text)
def test_localization(self):
today = date.today()
for lang in ['en','ca']:
activate(lang)
self.browser.get(self.get_full_url("home"))
local_date = self.browser.find_element_by_id("local-date")
non_local_date = self.browser.find_element_by_id("non-local-date")
self.assertEqual(formats.date_format(today, use_l10n=True), local_date.text)
self.assertEqual(today.strftime('%Y-%m-%d'), non_local_date.text)
def test_time_zone(self):
self.browser.get(self.get_full_url("home"))
tz = self.browser.find_element_by_id("time-tz").text
utc = self.browser.find_element_by_id("time-utc").text
ny = self.browser.find_element_by_id("time-ny").text
self.assertNotEqual(tz,utc)
self.assertNotIn(ny, [tz,utc])
|
mit
|
keithroe/vtkoptix
|
ThirdParty/Twisted/twisted/conch/test/test_checkers.py
|
27
|
22085
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.checkers}.
"""
try:
import crypt
except ImportError:
cryptSkip = 'cannot run without crypt module'
else:
cryptSkip = None
import os, base64
from twisted.python import util
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import UsernamePassword, IUsernamePassword, \
SSHPrivateKey, ISSHPrivateKey
from twisted.cred.error import UnhandledCredentials, UnauthorizedLogin
from twisted.python.fakepwd import UserDatabase, ShadowDatabase
from twisted.test.test_process import MockOS
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
dependencySkip = "can't run without Crypto and PyASN1"
else:
dependencySkip = None
from twisted.conch.ssh import keys
from twisted.conch import checkers
from twisted.conch.error import NotEnoughAuthentication, ValidPublicKey
from twisted.conch.test import keydata
if getattr(os, 'geteuid', None) is None:
euidSkip = "Cannot run without effective UIDs (questionable)"
else:
euidSkip = None
class HelperTests(TestCase):
"""
Tests for helper functions L{verifyCryptedPassword}, L{_pwdGetByName} and
L{_shadowGetByName}.
"""
skip = cryptSkip or dependencySkip
def setUp(self):
self.mockos = MockOS()
def test_verifyCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{True} if the plaintext password
passed to it matches the encrypted password passed to it.
"""
password = 'secret string'
salt = 'salty'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %r' % (
crypted, password))
def test_verifyCryptedPasswordMD5(self):
"""
L{verifyCryptedPassword} returns True if the provided cleartext password
matches the provided MD5 password hash.
"""
password = 'password'
salt = '$1$salt'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %s' % (
crypted, password))
def test_refuteCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{False} if the plaintext password
passed to it does not match the encrypted password passed to it.
"""
password = 'string secret'
wrong = 'secret string'
crypted = crypt.crypt(password, password)
self.assertFalse(
checkers.verifyCryptedPassword(crypted, wrong),
'%r not supposed to be valid encrypted password for %s' % (
crypted, wrong))
def test_pwdGetByName(self):
"""
L{_pwdGetByName} returns a tuple of items from the UNIX /etc/passwd
database if the L{pwd} module is present.
"""
userdb = UserDatabase()
userdb.addUser(
'alice', 'secrit', 1, 2, 'first last', '/foo', '/bin/sh')
self.patch(checkers, 'pwd', userdb)
self.assertEqual(
checkers._pwdGetByName('alice'), userdb.getpwnam('alice'))
def test_pwdGetByNameWithoutPwd(self):
"""
If the C{pwd} module isn't present, L{_pwdGetByName} returns C{None}.
"""
self.patch(checkers, 'pwd', None)
self.assertIs(checkers._pwdGetByName('alice'), None)
def test_shadowGetByName(self):
"""
L{_shadowGetByName} returns a tuple of items from the UNIX /etc/shadow
database if the L{spwd} is present.
"""
userdb = ShadowDatabase()
userdb.addUser('bob', 'passphrase', 1, 2, 3, 4, 5, 6, 7)
self.patch(checkers, 'spwd', userdb)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(util, 'os', self.mockos)
self.assertEqual(
checkers._shadowGetByName('bob'), userdb.getspnam('bob'))
self.assertEqual(self.mockos.seteuidCalls, [0, 2345])
self.assertEqual(self.mockos.setegidCalls, [0, 1234])
def test_shadowGetByNameWithoutSpwd(self):
"""
L{_shadowGetByName} uses the C{shadow} module to return a tuple of items
from the UNIX /etc/shadow database if the C{spwd} module is not present
and the C{shadow} module is.
"""
userdb = ShadowDatabase()
userdb.addUser('bob', 'passphrase', 1, 2, 3, 4, 5, 6, 7)
self.patch(checkers, 'spwd', None)
self.patch(checkers, 'shadow', userdb)
self.patch(util, 'os', self.mockos)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.assertEqual(
checkers._shadowGetByName('bob'), userdb.getspnam('bob'))
self.assertEqual(self.mockos.seteuidCalls, [0, 2345])
self.assertEqual(self.mockos.setegidCalls, [0, 1234])
def test_shadowGetByNameWithoutEither(self):
"""
L{_shadowGetByName} returns C{None} if neither C{spwd} nor C{shadow} is
present.
"""
self.patch(checkers, 'spwd', None)
self.patch(checkers, 'shadow', None)
self.assertIs(checkers._shadowGetByName('bob'), None)
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
class SSHPublicKeyDatabaseTestCase(TestCase):
"""
Tests for L{SSHPublicKeyDatabase}.
"""
skip = euidSkip or dependencySkip
def setUp(self):
self.checker = checkers.SSHPublicKeyDatabase()
self.key1 = base64.encodestring("foobar")
self.key2 = base64.encodestring("eggspam")
self.content = "t1 %s foo\nt2 %s egg\n" % (self.key1, self.key2)
self.mockos = MockOS()
self.mockos.path = FilePath(self.mktemp())
self.mockos.path.makedirs()
self.patch(util, 'os', self.mockos)
self.sshDir = self.mockos.path.child('.ssh')
self.sshDir.makedirs()
userdb = UserDatabase()
userdb.addUser(
'user', 'password', 1, 2, 'first last',
self.mockos.path.path, '/bin/shell')
self.checker._userdb = userdb
def _testCheckKey(self, filename):
self.sshDir.child(filename).setContent(self.content)
user = UsernamePassword("user", "password")
user.blob = "foobar"
self.assertTrue(self.checker.checkKey(user))
user.blob = "eggspam"
self.assertTrue(self.checker.checkKey(user))
user.blob = "notallowed"
self.assertFalse(self.checker.checkKey(user))
def test_checkKey(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys file and check the keys against that file.
"""
self._testCheckKey("authorized_keys")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKey2(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys2 file and check the keys against that file.
"""
self._testCheckKey("authorized_keys2")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKeyAsRoot(self):
"""
If the key file is readable, L{SSHPublicKeyDatabase.checkKey} should
switch its uid/gid to the ones of the authenticated user.
"""
keyFile = self.sshDir.child("authorized_keys")
keyFile.setContent(self.content)
# Fake permission error by changing the mode
keyFile.chmod(0000)
self.addCleanup(keyFile.chmod, 0777)
# And restore the right mode when seteuid is called
savedSeteuid = self.mockos.seteuid
def seteuid(euid):
keyFile.chmod(0777)
return savedSeteuid(euid)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(self.mockos, "seteuid", seteuid)
self.patch(util, 'os', self.mockos)
user = UsernamePassword("user", "password")
user.blob = "foobar"
self.assertTrue(self.checker.checkKey(user))
self.assertEqual(self.mockos.seteuidCalls, [0, 1, 0, 2345])
self.assertEqual(self.mockos.setegidCalls, [2, 1234])
def test_requestAvatarId(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should return the avatar id
passed in if its C{_checkKey} method returns True.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
'test', 'ssh-rsa', keydata.publicRSA_openssh, 'foo',
keys.Key.fromString(keydata.privateRSA_openssh).sign('foo'))
d = self.checker.requestAvatarId(credentials)
def _verify(avatarId):
self.assertEqual(avatarId, 'test')
return d.addCallback(_verify)
def test_requestAvatarIdWithoutSignature(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should raise L{ValidPublicKey}
if the credentials represent a valid key without a signature. This
tells the user that the key is valid for login, but does not actually
allow that user to do so without a signature.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
'test', 'ssh-rsa', keydata.publicRSA_openssh, None, None)
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, ValidPublicKey)
def test_requestAvatarIdInvalidKey(self):
"""
If L{SSHPublicKeyDatabase.checkKey} returns False,
C{_cbRequestAvatarId} should raise L{UnauthorizedLogin}.
"""
def _checkKey(ignored):
return False
self.patch(self.checker, 'checkKey', _checkKey)
d = self.checker.requestAvatarId(None);
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdInvalidSignature(self):
"""
Valid keys with invalid signatures should cause
L{SSHPublicKeyDatabase.requestAvatarId} to return a {UnauthorizedLogin}
failure
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
'test', 'ssh-rsa', keydata.publicRSA_openssh, 'foo',
keys.Key.fromString(keydata.privateDSA_openssh).sign('foo'))
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdNormalizeException(self):
"""
Exceptions raised while verifying the key should be normalized into an
C{UnauthorizedLogin} failure.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey('test', None, 'blob', 'sigData', 'sig')
d = self.checker.requestAvatarId(credentials)
def _verifyLoggedException(failure):
errors = self.flushLoggedErrors(keys.BadKeyError)
self.assertEqual(len(errors), 1)
return failure
d.addErrback(_verifyLoggedException)
return self.assertFailure(d, UnauthorizedLogin)
class SSHProtocolCheckerTestCase(TestCase):
"""
Tests for L{SSHProtocolChecker}.
"""
skip = dependencySkip
def test_registerChecker(self):
"""
L{SSHProcotolChecker.registerChecker} should add the given checker to
the list of registered checkers.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(), )
self.assertEqual(checker.credentialInterfaces, [ISSHPrivateKey])
self.assertIsInstance(checker.checkers[ISSHPrivateKey],
checkers.SSHPublicKeyDatabase)
def test_registerCheckerWithInterface(self):
"""
If a apecific interface is passed into
L{SSHProtocolChecker.registerChecker}, that interface should be
registered instead of what the checker specifies in
credentialIntefaces.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(),
IUsernamePassword)
self.assertEqual(checker.credentialInterfaces, [IUsernamePassword])
self.assertIsInstance(checker.checkers[IUsernamePassword],
checkers.SSHPublicKeyDatabase)
def test_requestAvatarId(self):
"""
L{SSHProtocolChecker.requestAvatarId} should defer to one if its
registered checkers to authenticate a user.
"""
checker = checkers.SSHProtocolChecker()
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser('test', 'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
def _callback(avatarId):
self.assertEqual(avatarId, 'test')
return d.addCallback(_callback)
def test_requestAvatarIdWithNotEnoughAuthentication(self):
"""
If the client indicates that it is never satisfied, by always returning
False from _areDone, then L{SSHProtocolChecker} should raise
L{NotEnoughAuthentication}.
"""
checker = checkers.SSHProtocolChecker()
def _areDone(avatarId):
return False
self.patch(checker, 'areDone', _areDone)
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser('test', 'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
return self.assertFailure(d, NotEnoughAuthentication)
def test_requestAvatarIdInvalidCredential(self):
"""
If the passed credentials aren't handled by any registered checker,
L{SSHProtocolChecker} should raise L{UnhandledCredentials}.
"""
checker = checkers.SSHProtocolChecker()
d = checker.requestAvatarId(UsernamePassword('test', 'test'))
return self.assertFailure(d, UnhandledCredentials)
def test_areDone(self):
"""
The default L{SSHProcotolChecker.areDone} should simply return True.
"""
self.assertEqual(checkers.SSHProtocolChecker().areDone(None), True)
class UNIXPasswordDatabaseTests(TestCase):
"""
Tests for L{UNIXPasswordDatabase}.
"""
skip = cryptSkip or dependencySkip
def assertLoggedIn(self, d, username):
"""
Assert that the L{Deferred} passed in is called back with the value
'username'. This represents a valid login for this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{Deferred}
"""
result = []
d.addBoth(result.append)
self.assertEqual(len(result), 1, "login incomplete")
if isinstance(result[0], Failure):
result[0].raiseException()
self.assertEqual(result[0], username)
def test_defaultCheckers(self):
"""
L{UNIXPasswordDatabase} with no arguments has checks the C{pwd} database
and then the C{spwd} database.
"""
checker = checkers.UNIXPasswordDatabase()
def crypted(username, password):
salt = crypt.crypt(password, username)
crypted = crypt.crypt(password, '$1$' + salt)
return crypted
pwd = UserDatabase()
pwd.addUser('alice', crypted('alice', 'password'),
1, 2, 'foo', '/foo', '/bin/sh')
# x and * are convention for "look elsewhere for the password"
pwd.addUser('bob', 'x', 1, 2, 'bar', '/bar', '/bin/sh')
spwd = ShadowDatabase()
spwd.addUser('alice', 'wrong', 1, 2, 3, 4, 5, 6, 7)
spwd.addUser('bob', crypted('bob', 'password'),
8, 9, 10, 11, 12, 13, 14)
self.patch(checkers, 'pwd', pwd)
self.patch(checkers, 'spwd', spwd)
mockos = MockOS()
self.patch(util, 'os', mockos)
mockos.euid = 2345
mockos.egid = 1234
cred = UsernamePassword("alice", "password")
self.assertLoggedIn(checker.requestAvatarId(cred), 'alice')
self.assertEqual(mockos.seteuidCalls, [])
self.assertEqual(mockos.setegidCalls, [])
cred.username = "bob"
self.assertLoggedIn(checker.requestAvatarId(cred), 'bob')
self.assertEqual(mockos.seteuidCalls, [0, 2345])
self.assertEqual(mockos.setegidCalls, [0, 1234])
def assertUnauthorizedLogin(self, d):
"""
Asserts that the L{Deferred} passed in is erred back with an
L{UnauthorizedLogin} L{Failure}. This reprsents an invalid login for
this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{None}
"""
self.assertRaises(
checkers.UnauthorizedLogin, self.assertLoggedIn, d, 'bogus value')
def test_passInCheckers(self):
"""
L{UNIXPasswordDatabase} takes a list of functions to check for UNIX
user information.
"""
password = crypt.crypt('secret', 'secret')
userdb = UserDatabase()
userdb.addUser('anybody', password, 1, 2, 'foo', '/bar', '/bin/sh')
checker = checkers.UNIXPasswordDatabase([userdb.getpwnam])
self.assertLoggedIn(
checker.requestAvatarId(UsernamePassword('anybody', 'secret')),
'anybody')
def test_verifyPassword(self):
"""
If the encrypted password provided by the getpwnam function is valid
(verified by the L{verifyCryptedPassword} function), we callback the
C{requestAvatarId} L{Deferred} with the username.
"""
def verifyCryptedPassword(crypted, pw):
return crypted == pw
def getpwnam(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword('username', 'username')
self.assertLoggedIn(checker.requestAvatarId(credential), 'username')
def test_failOnKeyError(self):
"""
If the getpwnam function raises a KeyError, the login fails with an
L{UnauthorizedLogin} exception.
"""
def getpwnam(username):
raise KeyError(username)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword('username', 'username')
self.assertUnauthorizedLogin(checker.requestAvatarId(credential))
def test_failOnBadPassword(self):
"""
If the verifyCryptedPassword function doesn't verify the password, the
login fails with an L{UnauthorizedLogin} exception.
"""
def verifyCryptedPassword(crypted, pw):
return False
def getpwnam(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam])
credential = UsernamePassword('username', 'username')
self.assertUnauthorizedLogin(checker.requestAvatarId(credential))
def test_loopThroughFunctions(self):
"""
UNIXPasswordDatabase.requestAvatarId loops through each getpwnam
function associated with it and returns a L{Deferred} which fires with
the result of the first one which returns a value other than None.
ones do not verify the password.
"""
def verifyCryptedPassword(crypted, pw):
return crypted == pw
def getpwnam1(username):
return [username, 'not the password']
def getpwnam2(username):
return [username, username]
self.patch(checkers, 'verifyCryptedPassword', verifyCryptedPassword)
checker = checkers.UNIXPasswordDatabase([getpwnam1, getpwnam2])
credential = UsernamePassword('username', 'username')
self.assertLoggedIn(checker.requestAvatarId(credential), 'username')
def test_failOnSpecial(self):
"""
If the password returned by any function is C{""}, C{"x"}, or C{"*"} it
is not compared against the supplied password. Instead it is skipped.
"""
pwd = UserDatabase()
pwd.addUser('alice', '', 1, 2, '', 'foo', 'bar')
pwd.addUser('bob', 'x', 1, 2, '', 'foo', 'bar')
pwd.addUser('carol', '*', 1, 2, '', 'foo', 'bar')
self.patch(checkers, 'pwd', pwd)
checker = checkers.UNIXPasswordDatabase([checkers._pwdGetByName])
cred = UsernamePassword('alice', '')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
cred = UsernamePassword('bob', 'x')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
cred = UsernamePassword('carol', '*')
self.assertUnauthorizedLogin(checker.requestAvatarId(cred))
|
bsd-3-clause
|
gezb/osmc
|
package/mediacenter-addon-osmc/src/script.module.osmcsetting.apfstore/resources/lib/CompLogger.py
|
93
|
2077
|
import time
from functools import wraps
TEST_LOG_BOOL = True
def test_logger(msg):
print 'test-' + msg
def comprehensive_logger(logger=None, logging=True, maxlength=250, nowait=False):
'''
Decorator to log the inputs and outputs of functions, as well as the time taken
to run the function.
Requires: time, functools
logger: [opt] logging function, if not provided print is used
logging: [opt] boolean, turn logging on and off, default is True
maxlength: [opt] integer, sets the maximum length an argument or returned variable cant take, default 25
nowait: [opt] boolean, instructs the logger not to wait for the function to finish, default is False
'''
def default_logger(msg):
print msg
if logger == None:
logger = default_logger
def get_args(*args, **kwargs):
all_args = []
for i, arg in enumerate(args):
itm = 'pos' + str(i) + ": " + str(arg)[:maxlength]
all_args.append(itm)
for k, v in kwargs.iteritems():
itm = str(k) + ": " + str(v)[:maxlength]
all_args.append(itm)
return all_args
def decorater(func):
@wraps(func)
def wrapper(*args, **kwargs):
if logging and logger != None:
logger(func.__module__ + '.' + func.__name__ + " received: " + ", ".join(get_args(*args, **kwargs)))
if nowait:
func(*args, **kwargs)
logger(func.__module__ + '.' + func.__name__ + " -nowait")
return
else:
start = time.time()
result = func(*args, **kwargs)
end = time.time()
if logging and logger != None:
logger(func.__module__ + '.' + func.__name__ + " [" + str(end-start) + "] " + ' returns: ' + str(result)[:maxlength])
return result
return wrapper
return decorater
clog = comprehensive_logger
@clog(logging=TEST_LOG_BOOL)
def arg_tester(a, b, cdef):
print 'a: ' + str(a)
print 'b: ' + str(b)
print 'cdef: ' + str(cdef)
if __name__ == "__main__":
arg_tester('han', ['chewie', 'luke'], cdef='123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890')
|
gpl-2.0
|
trondeau/gnuradio-old
|
grc/grc_gnuradio/blks2/selector.py
|
7
|
5833
|
#
# Copyright 2008,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
class selector(gr.hier_block2):
"""A hier2 block with N inputs and M outputs, where data is only forwarded through input n to output m."""
def __init__(self, item_size, num_inputs, num_outputs, input_index, output_index):
"""
Selector constructor.
Args:
item_size: the size of the gr data stream in bytes
num_inputs: the number of inputs (integer)
num_outputs: the number of outputs (integer)
input_index: the index for the source data
output_index: the index for the destination data
"""
gr.hier_block2.__init__(
self, 'selector',
gr.io_signature(num_inputs, num_inputs, item_size),
gr.io_signature(num_outputs, num_outputs, item_size),
)
#terminator blocks for unused inputs and outputs
self.input_terminators = [blocks.null_sink(item_size) for i in range(num_inputs)]
self.output_terminators = [blocks.head(item_size, 0) for i in range(num_outputs)]
self.copy = blocks.copy(item_size)
#connections
for i in range(num_inputs): self.connect((self, i), self.input_terminators[i])
for i in range(num_outputs): self.connect(blocks.null_source(item_size),
self.output_terminators[i], (self, i))
self.item_size = item_size
self.input_index = input_index
self.output_index = output_index
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self._connect_current()
def _indexes_valid(self):
"""
Are the input and output indexes within range of the number of inputs and outputs?
Returns:
true if input index and output index are in range
"""
return self.input_index in range(self.num_inputs) and self.output_index in range(self.num_outputs)
def _connect_current(self):
"""If the input and output indexes are valid:
disconnect the blocks at the input and output index from their terminators,
and connect them to one another. Then connect the terminators to one another."""
if self._indexes_valid():
self.disconnect((self, self.input_index), self.input_terminators[self.input_index])
self.disconnect(self.output_terminators[self.output_index], (self, self.output_index))
self.connect((self, self.input_index), self.copy)
self.connect(self.copy, (self, self.output_index))
self.connect(self.output_terminators[self.output_index], self.input_terminators[self.input_index])
def _disconnect_current(self):
"""If the input and output indexes are valid:
disconnect the blocks at the input and output index from one another,
and the terminators at the input and output index from one another.
Reconnect the blocks to the terminators."""
if self._indexes_valid():
self.disconnect((self, self.input_index), self.copy)
self.disconnect(self.copy, (self, self.output_index))
self.disconnect(self.output_terminators[self.output_index], self.input_terminators[self.input_index])
self.connect((self, self.input_index), self.input_terminators[self.input_index])
self.connect(self.output_terminators[self.output_index], (self, self.output_index))
def set_input_index(self, input_index):
"""
Change the block to the new input index if the index changed.
Args:
input_index: the new input index
"""
if self.input_index != input_index:
self.lock()
self._disconnect_current()
self.input_index = input_index
self._connect_current()
self.unlock()
def set_output_index(self, output_index):
"""
Change the block to the new output index if the index changed.
Args:
output_index: the new output index
"""
if self.output_index != output_index:
self.lock()
self._disconnect_current()
self.output_index = output_index
self._connect_current()
self.unlock()
class valve(selector):
"""Wrapper for selector with 1 input and 1 output."""
def __init__(self, item_size, open):
"""
Constructor for valve.
Args:
item_size: the size of the gr data stream in bytes
open: true if initial valve state is open
"""
if open: output_index = -1
else: output_index = 0
selector.__init__(self, item_size, 1, 1, 0, output_index)
def set_open(self, open):
"""
Callback to set open state.
Args:
open: true to set valve state to open
"""
if open: output_index = -1
else: output_index = 0
self.set_output_index(output_index)
|
gpl-3.0
|
qifeigit/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
207
|
27395
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
NiceCircuits/solderingPen
|
pc/bootloader/niceSolderingPenBootloader.py
|
1
|
4651
|
# -*- coding: utf-8 -*-
"""Nice Soldering Pen bootloader
Usage: niceSolderingPenBootloader.exe com_name [file_name]
com_name: name of serial port: "COM1" or "/dev/ttyS1"
file_name: name of file (*.bin) to be uploaded.
If not present, "firmware.bin" is used"""
import serial, sys, crcmod, time
def error(text):
print(__doc__)
print("Error: " + text)
quit()
class bootloader:
serial = None
file = None
crc_fun=None
COMMAND_ERASE = b'\x81'
COMMAND_WRITE = b'\x82'
COMMAND_INFO = b'\x84'
COMMAND_RUN_APP = b'\x88'
RESPONSE_OK = b'o'
RESPONSE_ERROR = b'e'
WRITE_BLOCK_SIZE = 128
WRITE_BLOCK_COUNT = 128
APP_INFO_SIZE=1024
CRC_SIZE = 4
RETRY_NUMBER = 4
# timeout big enough for erase cycle
TIMEOUT_S = 2
def read_info(self):
length = self.APP_INFO_SIZE+self.CRC_SIZE
time.sleep(0.15)
for i in range(self.RETRY_NUMBER):
print("Reading application info...")
self.serial.flush()
self.serial.write(self.COMMAND_INFO)
data=self.serial.read(length)
if len(data)== length:
crc_data=int.from_bytes(data[self.APP_INFO_SIZE:(length)],\
'little')
data=data[0:self.APP_INFO_SIZE]
crc=self.crc_fun(data)
if crc_data==crc:
print(data)
break
def write_app(self):
if self.file:
print("Writing flash...")
t_start = time.time()
for cnt in range(self.WRITE_BLOCK_COUNT):
data=self.file.read(self.WRITE_BLOCK_SIZE)
if len(data)==0:
break
elif len(data)<self.WRITE_BLOCK_SIZE:
# Fill remaining block area
data=data + b'\xFF'*(self.WRITE_BLOCK_SIZE-len(data))
err=self.RESPONSE_OK
for i in range(self.RETRY_NUMBER):
time.sleep(0.15)
self.serial.write(self.COMMAND_WRITE)
response=self.serial.read(1)
if response==self.RESPONSE_OK:
self.serial.write(data+self.crc_fun(data).to_bytes(4,'little'))
response=self.serial.read(1)
if response==self.RESPONSE_OK:
print("%1.1f%%" % ((cnt+1)/self.WRITE_BLOCK_COUNT*100),end="\r")
err=self.RESPONSE_OK
break
else:
err=response[0]
if err!=self.RESPONSE_OK:
print("Write error 0x%X" % response[0])
break
print("Elapsed %1.1fs" % (time.time()-t_start))
else:
print("No file to write")
def erase_app(self):
for cnt in range(self.WRITE_BLOCK_COUNT):
time.sleep(0.15)
self.serial.write(self.COMMAND_ERASE)
response=self.serial.read(1)
if response==self.RESPONSE_OK:
print("Chip erased")
return
print("Erase error 0x%X" % response[0])
def run_app(self):
self.serial.write(self.COMMAND_RUN_APP)
def __init__(self, serial_name, file_name):
self.crc_fun=crcmod.mkCrcFun(0x104C11DB7, 0xFFFFFFFF, rev=False)
try:
self.serial = serial.Serial(serial_name, 1200, inter_byte_timeout=self.TIMEOUT_S)
except serial.SerialException:
error("Cannot open COM port " + serial_name)
print("Open serial port " + self.serial.name)
if file_name:
try:
self.file = open(file_name,"rb")
except IOError:
error("Cannot open file " + file_name)
print("Open file " + file_name)
def __del__(self):
if(self.serial):
print("Close serial port " + self.serial.name)
self.serial.close()
if __name__ == "__main__":
if 1:
if len(sys.argv) < 2:
error("The folowing arguments are required: com_name")
else:
serial_name = sys.argv[1]
if len(sys.argv) < 3:
file_name = "firmware.bin"
else:
file_name = sys.argv[2]
bld = bootloader(serial_name, file_name)
bld.read_info()
bld.erase_app()
bld.write_app()
bld.run_app()
else:
serial_name="COM45"
bld = bootloader(serial_name, "")
bld.read_info()
|
cc0-1.0
|
2013Commons/HUE-SHARK
|
build/env/lib/python2.7/site-packages/Django-1.2.3-py2.7.egg/django/db/backends/util.py
|
19
|
4056
|
import datetime
import decimal
from time import time
from django.utils.hashcompat import md5_constructor
class CursorDebugWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db # Instance of a BaseDatabaseWrapper subclass
def execute(self, sql, params=()):
start = time()
try:
return self.cursor.execute(sql, params)
finally:
stop = time()
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries.append({
'sql': sql,
'time': "%.3f" % (stop - start),
})
def executemany(self, sql, param_list):
start = time()
try:
return self.cursor.executemany(sql, param_list)
finally:
stop = time()
self.db.queries.append({
'sql': '%s times: %s' % (len(param_list), sql),
'time': "%.3f" % (stop - start),
})
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return s and datetime.date(*map(int, s.split('-'))) or None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s: return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.'+microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s: return None
if not ' ' in s: return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds), int(float('.'+microseconds) * 1000000))
def typecast_boolean(s):
if s is None: return None
if not s: return False
return str(s)[0].lower() == 't'
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_boolean(obj, d):
return obj and '1' or '0'
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hash = md5_constructor(name).hexdigest()[:4]
return '%s%s' % (name[:length-4], hash)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return u'%s' % str(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return u"%.*f" % (decimal_places, value)
|
apache-2.0
|
NeuralEnsemble/neuroConstruct
|
lib/jython/Lib/xml/sax/__init__.py
|
117
|
3413
|
"""Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
drivers2 -- Contains the driver for that wraps a Java sax implementation in python
objects.
"""
from xmlreader import InputSource
from handler import ContentHandler, ErrorHandler
from _exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.drivers2.drv_javasax"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.drivers2.drv_javasax
import os, sys
if os.environ.has_key("PY_SAX_PARSER"):
default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError,e:
import sys
if sys.modules.has_key(parser_name):
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
|
gpl-2.0
|
nickto/telegram-bot-aurora
|
tests/test_forecast.py
|
1
|
5195
|
import datetime
import os
import sys
import unittest
sys.path.append('../')
import forecast as fc
class TestCoodinateConversion(unittest.TestCase):
def setUp(self):
pass
def test_lon_from_360_to_180(self):
self.assertEqual(fc.lon_from_360_to_180(0), 0)
self.assertEqual(fc.lon_from_360_to_180(45), 45)
self.assertIn(fc.lon_from_360_to_180(180), [180, -180]) # both are correct
self.assertEqual(fc.lon_from_360_to_180(190), -170)
self.assertEqual(fc.lon_from_360_to_180(315), -45)
self.assertEqual(fc.lon_from_360_to_180(370), 10)
self.assertEqual(fc.lon_from_360_to_180(-10), -10)
def test_lon_from_180_to_360(self):
self.assertEqual(fc.lon_from_180_to_360(0), 0)
self.assertEqual(fc.lon_from_180_to_360(45), 45)
self.assertEqual(fc.lon_from_180_to_360(180), 180)
self.assertEqual(fc.lon_from_180_to_360(-180), 180)
self.assertEqual(fc.lon_from_180_to_360(-170), 190)
self.assertEqual(fc.lon_from_180_to_360(-45), 315)
def test_lon_from_180_to_1024(self):
self.assertAlmostEqual(fc.lon_from_180_to_1024(0), 0, delta=1)
self.assertAlmostEqual(fc.lon_from_180_to_1024(45), 1024 / 8, delta=1)
self.assertAlmostEqual(fc.lon_from_180_to_1024(90), 1024 / 4, delta=1)
self.assertAlmostEqual(fc.lon_from_180_to_1024(45 + 90), 3 * 1024 / 8, delta=1)
self.assertAlmostEqual(fc.lon_from_180_to_1024(180), 1024 / 2, delta=1)
self.assertAlmostEqual(fc.lon_from_180_to_1024(190), 540, delta=1)
self.assertAlmostEqual(fc.lon_from_180_to_1024(-45), 1024 - 1024 / 8, delta=1)
self.assertAlmostEqual(fc.lon_from_180_to_1024(-90), 1024 - 1024 / 4, delta=1)
self.assertAlmostEqual(fc.lon_from_180_to_1024(-180), 1024 / 2, delta=1)
self.assertEqual(fc.lon_from_180_to_1024(-180), fc.lon_from_180_to_1024(180))
self.assertEqual(fc.lon_from_180_to_1024(-190), fc.lon_from_180_to_1024(170))
self.assertEqual(fc.lon_from_180_to_1024(190), fc.lon_from_180_to_1024(-170))
self.assertLessEqual(fc.lon_from_180_to_1024(-0.0001), 1023)
def test_lat_from_90_to_512(self):
self.assertAlmostEqual(fc.lat_from_90_to_512(-90), 0, delta=1)
self.assertAlmostEqual(fc.lat_from_90_to_512(0), 256, delta=1)
self.assertAlmostEqual(fc.lat_from_90_to_512(90), 511, delta=1)
self.assertRaises(Exception, fc.lat_from_90_to_512, -100)
self.assertRaises(Exception, fc.lat_from_90_to_512, 100)
class TestStringsConversion(unittest.TestCase):
def setUp(self):
pass
def test_str_to_dattime(self):
self.assertIsInstance(fc.str_dmy_to_datetime_utc("09 Sep 2017"), datetime.datetime)
self.assertIsInstance(fc.str_ymdhm_to_datetime_utc("2017-09-09 21:45"), datetime.datetime)
class TestMagneticCoordinates(unittest.TestCase):
# True values are manually taken from here: http://wdc.kugi.kyoto-u.ac.jp/igrf/gggm/index.html
def setUp(self):
pass
# Equator
def test_0_0(self):
place = fc.Place(0, 0)
self.assertAlmostEqual(place.location["mag"]["lat"], 2.88, delta=1)
self.assertAlmostEqual(place.location["mag"]["lon"], 72.86, delta=1)
def test_10_10(self):
place = fc.Place(10, 10)
self.assertAlmostEqual(place.location["mag"]["lat"], 11.03, delta=1)
self.assertAlmostEqual(place.location["mag"]["lon"], 84.41, delta=1)
# Middle latitudes
def test_56_15(self):
place = fc.Place(56, 15)
self.assertAlmostEqual(place.location["mag"]["lat"], 55.03, delta=1)
self.assertAlmostEqual(place.location["mag"]["lon"], 101.70, delta=1)
# Hight latitudes
def test_80_5(self):
place = fc.Place(80, 5)
self.assertAlmostEqual(place.location["mag"]["lat"], 77.65, delta=1)
self.assertAlmostEqual(place.location["mag"]["lon"], 127.03, delta=1)
# Negative magnetic longitudes
def test_70_m80(self):
place = fc.Place(70, -80)
self.assertAlmostEqual(place.location["mag"]["lat"], 79.41, delta=1)
self.assertAlmostEqual(place.location["mag"]["lon"], -13.91, delta=1)
# Negative magnetic longitude and latitude
def test_m50_m100(self):
place = fc.Place(-50, -100)
self.assertAlmostEqual(place.location["mag"]["lat"], -41.04, delta=1)
self.assertAlmostEqual(place.location["mag"]["lon"], -23.17, delta=1)
class TestForecast(unittest.TestCase):
FORECASTS_DIR = "./data/forecasts"
NOWCASTS_DIR = "./data/nowcasts"
def setUp(self):
os.chdir("../")
#os.system("./scraper.py forecast")
#os.system("./scraper.py nowcast")
def tearDown(self):
os.chdir("./tests")
def test(self):
place = fc.Place(-62, -13)
forecast = fc.Forecast(place)
forecast.update_to_latest(self.FORECASTS_DIR)
print(forecast.place)
print(forecast.kp)
path = forecast.plot_forecast()
print(path)
kp_above_required = forecast.get_above_required_kp(0)
print(kp_above_required)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
mit
|
Unow/edx-platform
|
lms/djangoapps/courseware/tests/test_views.py
|
10
|
22197
|
# coding=UTF-8
"""
Tests courseware views.py
"""
import unittest
from datetime import datetime
from mock import MagicMock, patch
from pytz import UTC
from django.test import TestCase
from django.http import Http404
from django.test.utils import override_settings
from django.contrib.auth.models import User, AnonymousUser
from django.test.client import RequestFactory
from django.conf import settings
from django.core.urlresolvers import reverse
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory
from edxmako.middleware import MakoMiddleware
from opaque_keys.edx.locations import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory
import courseware.views as views
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from course_modes.models import CourseMode
import shoppingcart
from util.tests.test_date_utils import fake_ugettext, fake_pgettext
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestJumpTo(TestCase):
"""
Check the jumpto link for a course.
"""
def setUp(self):
# Use toy course from XML
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_jumpto_invalid_location(self):
location = self.course_key.make_usage_key(None, 'NoSuchPlace')
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
def test_jumpto_from_chapter(self):
location = self.course_key.make_usage_key('chapter', 'Overview')
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id(self):
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), 'Overview')
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id_invalid_location(self):
location = Location('edX', 'toy', 'NoSuchPlace', None, None, None)
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ViewsTestCase(TestCase):
"""
Tests for views.py methods.
"""
def setUp(self):
self.course = CourseFactory()
self.chapter = ItemFactory(category='chapter', parent_location=self.course.location) # pylint: disable=no-member
self.section = ItemFactory(category='sequential', parent_location=self.chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
self.vertical = ItemFactory(category='vertical', parent_location=self.section.location)
self.component = ItemFactory(category='problem', parent_location=self.vertical.location)
self.course_key = self.course.id
self.user = User.objects.create(username='dummy', password='123456',
email='[email protected]')
self.date = datetime(2013, 1, 22, tzinfo=UTC)
self.enrollment = CourseEnrollment.enroll(self.user, self.course_key)
self.enrollment.created = self.date
self.enrollment.save()
self.request_factory = RequestFactory()
chapter = 'Overview'
self.chapter_url = '%s/%s/%s' % ('/courses', self.course_key, chapter)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SHOPPING_CART'), "Shopping Cart not enabled in settings")
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_course_about_in_cart(self):
in_cart_span = '<span class="add-to-cart">'
# don't mock this course due to shopping cart existence checking
course = CourseFactory.create(org="new", number="unenrolled", display_name="course")
request = self.request_factory.get(reverse('about_course', args=[course.id.to_deprecated_string()]))
request.user = AnonymousUser()
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# authenticated user with nothing in cart
request.user = self.user
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# now add the course to the cart
cart = shoppingcart.models.Order.get_cart_for_user(self.user)
shoppingcart.models.PaidCourseRegistration.add_to_order(cart, course.id)
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertIn(in_cart_span, response.content)
def test_user_groups(self):
# depreciated function
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertEqual(views.user_groups(mock_user), [])
def test_get_current_child(self):
self.assertIsNone(views.get_current_child(MagicMock()))
mock_xmodule = MagicMock()
mock_xmodule.position = -1
mock_xmodule.get_display_items.return_value = ['one', 'two']
self.assertEqual(views.get_current_child(mock_xmodule), 'one')
mock_xmodule_2 = MagicMock()
mock_xmodule_2.position = 3
mock_xmodule_2.get_display_items.return_value = []
self.assertIsNone(views.get_current_child(mock_xmodule_2))
def test_redirect_to_course_position(self):
mock_module = MagicMock()
mock_module.descriptor.id = 'Underwater Basketweaving'
mock_module.position = 3
mock_module.get_display_items.return_value = []
self.assertRaises(Http404, views.redirect_to_course_position,
mock_module, views.CONTENT_DEPTH)
def test_index_invalid_position(self):
request_url = '/'.join([
'/courses',
self.course.id.to_deprecated_string(),
self.chapter.location.name,
self.section.location.name,
'f'
])
response = self.client.get(request_url)
self.assertEqual(response.status_code, 404)
def test_unicode_handling_in_url(self):
url_parts = [
'/courses',
self.course.id.to_deprecated_string(),
self.chapter.location.name,
self.section.location.name,
'1'
]
for idx, val in enumerate(url_parts):
url_parts_copy = url_parts[:]
url_parts_copy[idx] = val + u'χ'
request_url = '/'.join(url_parts_copy)
response = self.client.get(request_url)
self.assertEqual(response.status_code, 404)
def test_registered_for_course(self):
self.assertFalse(views.registered_for_course('Basketweaving', None))
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertFalse(views.registered_for_course('dummy', mock_user))
mock_course = MagicMock()
mock_course.id = self.course_key
self.assertTrue(views.registered_for_course(mock_course, self.user))
def test_jump_to_invalid(self):
# TODO add a test for invalid location
# TODO add a test for no data *
request = self.request_factory.get(self.chapter_url)
self.assertRaisesRegexp(Http404, 'Invalid course_key or usage_key', views.jump_to,
request, 'bar', ())
def test_no_end_on_about_page(self):
# Toy course has no course end date or about/end_date blob
self.verify_end_date('edX/toy/TT_2012_Fall')
def test_no_end_about_blob(self):
# test_end has a course end date, no end_date HTML blob
self.verify_end_date("edX/test_end/2012_Fall", "Sep 17, 2015")
def test_about_blob_end_date(self):
# test_about_blob_end_date has both a course end date and an end_date HTML blob.
# HTML blob wins
self.verify_end_date("edX/test_about_blob_end_date/2012_Fall", "Learning never ends")
def verify_end_date(self, course_id, expected_end_text=None):
"""
Visits the about page for `course_id` and tests that both the text "Classes End", as well
as the specified `expected_end_text`, is present on the page.
If `expected_end_text` is None, verifies that the about page *does not* contain the text
"Classes End".
"""
request = self.request_factory.get("foo")
request.user = self.user
# TODO: Remove the dependency on MakoMiddleware (by making the views explicitly supply a RequestContext)
MakoMiddleware().process_request(request)
result = views.course_about(request, course_id)
if expected_end_text is not None:
self.assertContains(result, "Classes End")
self.assertContains(result, expected_end_text)
else:
self.assertNotContains(result, "Classes End")
def test_chat_settings(self):
mock_user = MagicMock()
mock_user.username = "johndoe"
mock_course = MagicMock()
mock_course.id = "a/b/c"
# Stub this out in the case that it's not in the settings
domain = "jabber.edx.org"
settings.JABBER_DOMAIN = domain
chat_settings = views.chat_settings(mock_course, mock_user)
# Test the proper format of all chat settings
self.assertEqual(chat_settings['domain'], domain)
self.assertEqual(chat_settings['room'], "a-b-c_class")
self.assertEqual(chat_settings['username'], "johndoe@%s" % domain)
# TODO: this needs to be changed once we figure out how to
# generate/store a real password.
self.assertEqual(chat_settings['password'], "johndoe@%s" % domain)
def test_course_mktg_about_coming_soon(self):
# we should not be able to find this course
url = reverse('mktg_about_course', kwargs={'course_id': 'no/course/here'})
response = self.client.get(url)
self.assertIn('Coming Soon', response.content)
def test_course_mktg_register(self):
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
url = reverse('mktg_about_course', kwargs={'course_id': self.course_key.to_deprecated_string()})
response = self.client.get(url)
self.assertIn('Register for', response.content)
self.assertNotIn('and choose your student track', response.content)
def test_course_mktg_register_multiple_modes(self):
admin = AdminFactory()
CourseMode.objects.get_or_create(mode_slug='honor',
mode_display_name='Honor Code Certificate',
course_id=self.course_key)
CourseMode.objects.get_or_create(mode_slug='verified',
mode_display_name='Verified Certificate',
course_id=self.course_key)
self.client.login(username=admin.username, password='test')
url = reverse('mktg_about_course', kwargs={'course_id': self.course_key.to_deprecated_string()})
response = self.client.get(url)
self.assertIn('Register for', response.content)
self.assertIn('and choose your student track', response.content)
# clean up course modes
CourseMode.objects.all().delete()
def test_submission_history_accepts_valid_ids(self):
# log into a staff account
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': 'dummy',
'location': self.component.location.to_deprecated_string(),
})
response = self.client.get(url)
# Tests that we do not get an "Invalid x" response when passing correct arguments to view
self.assertFalse('Invalid' in response.content)
def test_submission_history_xss(self):
# log into a staff account
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
# try it with an existing user and a malicious location
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': 'dummy',
'location': '<script>alert("hello");</script>'
})
response = self.client.get(url)
self.assertFalse('<script>' in response.content)
# try it with a malicious user and a non-existent location
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': '<script>alert("hello");</script>',
'location': 'dummy'
})
response = self.client.get(url)
self.assertFalse('<script>' in response.content)
# setting TIME_ZONE_DISPLAYED_FOR_DEADLINES explicitly
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE, TIME_ZONE_DISPLAYED_FOR_DEADLINES="UTC")
class BaseDueDateTests(ModuleStoreTestCase):
"""
Base class that verifies that due dates are rendered correctly on a page
"""
__test__ = False
def get_text(self, course): # pylint: disable=unused-argument
"""Return the rendered text for the page to be verified"""
raise NotImplementedError
def set_up_course(self, **course_kwargs):
"""
Create a stock course with a specific due date.
:param course_kwargs: All kwargs are passed to through to the :class:`CourseFactory`
"""
course = CourseFactory(**course_kwargs)
chapter = ItemFactory(category='chapter', parent_location=course.location) # pylint: disable=no-member
section = ItemFactory(category='sequential', parent_location=chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
vertical = ItemFactory(category='vertical', parent_location=section.location)
ItemFactory(category='problem', parent_location=vertical.location)
course = modulestore().get_course(course.id) # pylint: disable=no-member
self.assertIsNotNone(course.get_children()[0].get_children()[0].due)
return course
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
self.time_with_tz = "due Sep 18, 2013 at 11:30 UTC"
self.time_without_tz = "due Sep 18, 2013 at 11:30"
def test_backwards_compatability(self):
# The test course being used has show_timezone = False in the policy file
# (and no due_date_display_format set). This is to test our backwards compatibility--
# in course_module's init method, the date_display_format will be set accordingly to
# remove the timezone.
course = self.set_up_course(due_date_display_format=None, show_timezone=False)
text = self.get_text(course)
self.assertIn(self.time_without_tz, text)
self.assertNotIn(self.time_with_tz, text)
# Test that show_timezone has been cleared (which means you get the default value of True).
self.assertTrue(course.show_timezone)
def test_defaults(self):
course = self.set_up_course()
text = self.get_text(course)
self.assertIn(self.time_with_tz, text)
def test_format_none(self):
# Same for setting the due date to None
course = self.set_up_course(due_date_display_format=None)
text = self.get_text(course)
self.assertIn(self.time_with_tz, text)
def test_format_plain_text(self):
# plain text due date
course = self.set_up_course(due_date_display_format="foobar")
text = self.get_text(course)
self.assertNotIn(self.time_with_tz, text)
self.assertIn("due foobar", text)
def test_format_date(self):
# due date with no time
course = self.set_up_course(due_date_display_format=u"%b %d %y")
text = self.get_text(course)
self.assertNotIn(self.time_with_tz, text)
self.assertIn("due Sep 18 13", text)
def test_format_hidden(self):
# hide due date completely
course = self.set_up_course(due_date_display_format=u"")
text = self.get_text(course)
self.assertNotIn("due ", text)
def test_format_invalid(self):
# improperly formatted due_date_display_format falls through to default
# (value of show_timezone does not matter-- setting to False to make that clear).
course = self.set_up_course(due_date_display_format=u"%%%", show_timezone=False)
text = self.get_text(course)
self.assertNotIn("%%%", text)
self.assertIn(self.time_with_tz, text)
class TestProgressDueDate(BaseDueDateTests):
"""
Test that the progress page displays due dates correctly
"""
__test__ = True
def get_text(self, course):
""" Returns the HTML for the progress page """
return views.progress(self.request, course.id.to_deprecated_string(), self.user.id).content
class TestAccordionDueDate(BaseDueDateTests):
"""
Test that the accordion page displays due dates correctly
"""
__test__ = True
def get_text(self, course):
""" Returns the HTML for the accordion """
return views.render_accordion(
self.request, course, course.get_children()[0].scope_ids.usage_id.to_deprecated_string(), None, None
)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class StartDateTests(ModuleStoreTestCase):
"""
Test that start dates are properly localized and displayed on the student
dashboard.
"""
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
def set_up_course(self):
"""
Create a stock course with a specific due date.
:param course_kwargs: All kwargs are passed to through to the :class:`CourseFactory`
"""
course = CourseFactory(start=datetime(2013, 9, 16, 7, 17, 28))
course = modulestore().get_course(course.id) # pylint: disable=no-member
return course
def get_about_text(self, course_key):
"""
Get the text of the /about page for the course.
"""
text = views.course_about(self.request, course_key.to_deprecated_string()).content
return text
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Sep"): "SEPTEMBER",
}))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%Y-%b-%d",
}))
def test_format_localized_in_studio_course(self):
course = self.set_up_course()
text = self.get_about_text(course.id)
# The start date is set in the set_up_course function above.
self.assertIn("2013-SEPTEMBER-16", text)
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Jul"): "JULY",
}))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%Y-%b-%d",
}))
def test_format_localized_in_xml_course(self):
text = self.get_about_text(SlashSeparatedCourseKey('edX', 'toy', 'TT_2012_Fall'))
# The start date is set in common/test/data/two_toys/policies/TT_2012_Fall/policy.json
self.assertIn("2015-JULY-17", text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ProgressPageTests(ModuleStoreTestCase):
"""
Tests that verify that the progress page works correctly.
"""
def setUp(self):
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
MakoMiddleware().process_request(self.request)
course = CourseFactory(
start=datetime(2013, 9, 16, 7, 17, 28),
grade_cutoffs={u'çü†øƒƒ': 0.75, 'Pass': 0.5},
)
self.course = modulestore().get_course(course.id) # pylint: disable=no-member
self.chapter = ItemFactory(category='chapter', parent_location=self.course.location) # pylint: disable=no-member
self.section = ItemFactory(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory(category='vertical', parent_location=self.section.location)
def test_pure_ungraded_xblock(self):
ItemFactory(category='acid', parent_location=self.vertical.location)
resp = views.progress(self.request, self.course.id.to_deprecated_string())
self.assertEqual(resp.status_code, 200)
def test_non_asci_grade_cutoffs(self):
resp = views.progress(self.request, self.course.id.to_deprecated_string())
self.assertEqual(resp.status_code, 200)
|
agpl-3.0
|
hsavolai/vmlab
|
src/kiwi/ui/test/common.py
|
1
|
7324
|
#
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2005,2006 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Johan Dahlin <[email protected]
#
"""
Common routines used by other parts of the ui test framework.
"""
import gobject
import gtk
from gtk import gdk
from kiwi.utils import gsignal
try:
from gtk.gdk import event_handler_set
event_handler_set # pyflakes
except ImportError:
try:
from kiwi._kiwi import event_handler_set
event_handler_set # pyflakes
except ImportError:
event_handler_set = None
class WidgetIntrospecter(gobject.GObject):
gsignal('window-added', object, str, object)
gsignal('window-removed', object, str)
def __init__(self):
gobject.GObject.__init__(self)
self._objects = {}
self._id_to_obj = {} # GdkWindow -> GtkWindow
self._windows = {} # toplevels ?
def _event_handler(self, event):
# Separate method so we can use return inside
self._check_event(event)
gtk.main_do_event(event)
def _check_event(self, event):
if not event.window:
return
window = event.window
event_type = event.type
window_type = window.get_window_type()
try:
widget = window.get_user_data()
except ValueError:
widget = self._id_to_obj.get(window)
if not isinstance(widget, gtk.Window):
return
widget_name = widget.get_name()
if event_type == gdk.MAP:
if window_type != gdk.WINDOW_TOPLEVEL:
# For non toplevels we only care about those which has a menu
# as the child
child = widget.child
if not child or not isinstance(child, gtk.Menu):
return
# Hack to get all the children of a popup menu in
# the same namespace as the window they were launched in.
parent_menu = child.get_data('parent-menu')
if parent_menu:
main = parent_menu.get_toplevel()
widget_name = main.get_name()
else:
self._window_added(widget, widget_name)
self._id_to_obj[window] = widget
elif (event_type == gdk.DELETE or
(event_type == gdk.WINDOW_STATE and
event.new_window_state == gdk.WINDOW_STATE_WITHDRAWN)):
self._window_removed(widget, widget_name)
def _window_added(self, window, name):
if name in self._windows:
return
self._windows[name] = window
# Toplevel
self.parse_one(window, window)
ns = self._objects[name]
self.emit('window-added', window, name, ns)
def _window_removed(self, window, name):
if not name in self._windows:
# Error?
return
del self._windows[name]
self.emit('window-removed', window, name)
def _add_widget(self, toplevel, widget, name):
toplevel_widgets = self._objects.setdefault(toplevel.get_name(), {})
if name in toplevel_widgets:
return
toplevel_widgets[name] = widget
# Listen to when the widget is removed from the interface, eg when
# ::parent changes to None. At that time remove the widget and all
# the children from the namespace.
def on_widget__notify_parent(widget, pspec, name, widgets,
signal_container):
# Only take action when the widget is removed from a parent
if widget.parent is not None:
return
for child_name, child in widgets.items():
if child.is_ancestor(widget):
del widgets[child_name]
widget.disconnect(signal_container.pop())
signal_container = []
sig_id = widget.connect('notify::parent', on_widget__notify_parent,
name, toplevel_widgets, signal_container)
signal_container.append(sig_id)
# Public API
def register_event_handler(self):
if not event_handler_set:
raise NotImplementedError
event_handler_set(self._event_handler)
def parse_one(self, toplevel, gobj):
if not isinstance(gobj, gobject.GObject):
raise TypeError
gtype = gobj
while True:
name = gobject.type_name(gtype)
func = getattr(self, name, None)
if func:
if func(toplevel, gobj):
break
if gtype == gobject.GObject.__gtype__:
break
gtype = gobject.type_parent(gtype)
#
# Special widget handling
#
def ignore(self, toplevel, gobj):
pass
GtkSeparatorMenuItem = GtkTearoffMenuItem = ignore
def GtkWidget(self, toplevel, widget):
"""
Called when a GtkWidget is about to be traversed
"""
# Workaround to support gtkbuilder and gazpacho
name = gtk.Buildable.get_name(widget)
if not name:
name = widget.get_name()
self._add_widget(toplevel, widget, name)
def GtkContainer(self, toplevel, container):
"""
Called when a GtkContainer is about to be traversed
Parsers all the children and listens for new children, which
may be added at a later point.
"""
for child in container.get_children():
self.parse_one(toplevel, child)
def _on_container_add(container, widget):
self.parse_one(toplevel, widget)
container.connect('add', _on_container_add)
def GtkDialog(self, toplevel, dialog):
"""
Called when a GtkDialog is about to be traversed
Just parses the widgets embedded in the dialogs.
"""
self.parse_one(toplevel, dialog.action_area)
self.parse_one(toplevel, dialog.vbox)
def GtkMenuItem(self, toplevel, item):
"""
Called when a GtkMenuItem is about to be traversed
It does some magic to tie a stronger connection between toplevel
menuitems and submenus, which later will be used.
"""
submenu = item.get_submenu()
if submenu:
submenu.set_data('parent-menu', item)
for child_item in submenu.get_children():
child_item.set_data('parent-menu', item)
self.parse_one(toplevel, submenu)
def GtkToolButton(self, toplevel, item):
item.child.set_name(item.get_name())
gobject.type_register(WidgetIntrospecter)
|
gpl-3.0
|
oVirt/Node
|
src/ovirt/node/setup/cim/cim_page.py
|
1
|
3665
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# cim_page.py - Copyright (C) 2012 Red Hat, Inc.
# Written by Fabian Deutsch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
from ovirt.node import plugins, valid, ui, utils, exceptions
import cim_model
from ovirt.node.plugins import Changeset
"""
Configure CIM
"""
class Plugin(plugins.NodePlugin):
_model = None
def __init__(self, app):
super(Plugin, self).__init__(app)
self._model = {}
def has_ui(self):
return True
def name(self):
return "CIM"
def rank(self):
return 45
def model(self):
cfg = cim_model.CIM().retrieve()
self.logger.debug(cfg)
model = {"cim.enabled": True if cfg["enabled"] else False,
"cim.password": "",
"cim.password_confirmation": "",
}
return model
def validators(self):
return {"cim.password": valid.Text()}
def ui_content(self):
ws = [ui.Header("header[0]", "CIM"),
ui.Checkbox("cim.enabled", "Enable CIM"),
ui.Divider("divider[0]"),
ui.Header("header[1]", "CIM Password"),
ui.PasswordEntry("cim.password", "Password:"),
ui.PasswordEntry("cim.password_confirmation",
"Confirm Password:"),
]
page = ui.Page("page", ws)
self.widgets.add(ws)
return page
def on_change(self, changes):
if changes.contains_any(["cim.password",
"cim.password_confirmation"]):
self._model.update(changes)
root_pw, root_pw_conf = self._model.get("cim.password", ""), \
self._model.get("cim.password_confirmation", "")
if root_pw != root_pw_conf:
raise exceptions.InvalidData("Passwords must be the same.")
else:
self.widgets["cim.password"].valid(True)
self.widgets["cim.password_confirmation"].valid(True)
def on_merge(self, effective_changes):
self.logger.debug("Saving CIM page")
changes = Changeset(self.pending_changes(False))
effective_model = Changeset(self.model())
effective_model.update(effective_changes)
self.logger.debug("Changes: %s" % changes)
self.logger.debug("Effective Model: %s" % effective_model)
cim_keys = ["cim.password_confirmation", "cim.enabled"]
txs = utils.Transaction("Updating CIM configuration")
if changes.contains_any(cim_keys):
is_enabled = effective_model["cim.enabled"]
pw = effective_model["cim.password_confirmation"]
model = cim_model.CIM()
model.update(is_enabled)
txs += model.transaction(cim_password=pw)
progress_dialog = ui.TransactionProgressDialog("dialog.txs", txs, self)
progress_dialog.run()
|
gpl-2.0
|
citrix-openstack-build/python-ceilometerclient
|
ceilometerclient/tests/v1/test_projects.py
|
4
|
1921
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometerclient.tests import utils
import ceilometerclient.v1.meters
fixtures = {
'/v1/projects': {
'GET': (
{},
{'projects': [
'a',
'b',
]},
),
},
'/v1/sources/source_b/projects': {
'GET': (
{},
{'projects': ['b']},
),
},
}
class ProjectManagerTest(utils.BaseTestCase):
def setUp(self):
super(ProjectManagerTest, self).setUp()
self.api = utils.FakeAPI(fixtures)
self.mgr = ceilometerclient.v1.meters.ProjectManager(self.api)
def test_list_all(self):
projects = list(self.mgr.list())
expect = [
('GET', '/v1/projects', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(projects), 2)
self.assertEqual(projects[0].project_id, 'a')
self.assertEqual(projects[1].project_id, 'b')
def test_list_by_source(self):
projects = list(self.mgr.list(source='source_b'))
expect = [
('GET', '/v1/sources/source_b/projects', {}, None),
]
self.assertEqual(self.api.calls, expect)
self.assertEqual(len(projects), 1)
self.assertEqual(projects[0].project_id, 'b')
|
apache-2.0
|
evildmp/Arkestra
|
vacancies_and_studentships/managers.py
|
2
|
1365
|
from django.db import models
from django.db.models import Q
from django.conf import settings
from datetime import datetime
import operator
from arkestra_utilities.managers import ArkestraGenericModelManager
from arkestra_utilities.settings import MULTIPLE_ENTITY_MODE
# class ItemManager(ArkestraGenericModelManager):
# def get_items(self, instance):
# """
# returns forthcoming_items, previous_items, series_items
# """
#
# # most likely, we're getting items related to an entity
# if MULTIPLE_ENTITY_MODE and instance.entity:
# all_items = self.model.objects.filter(
# Q(hosted_by__in=instance.entity.get_descendants(include_self = True)) | \
# Q(publish_to=instance.entity)).distinct().order_by('-date')
# else:
# all_items = self.model.objects.all().order_by('-date')
#
# instance.forthcoming_items = all_items.filter(date__gte = datetime.now())
# instance.previous_items = all_items.exclude(date__gte = datetime.now())
#
# if instance.view == "archive":
# instance.items = list(instance.previous_items)
#
# else:
# instance.items = list(instance.forthcoming_items)
#
# return instance.items
#
# class StudentshipManager(ItemManager):
# pass
#
# class VacancyManager(ItemManager):
# pass
|
bsd-2-clause
|
oliver-sanders/cylc
|
cylc/flow/scripts/cylc_poll.py
|
1
|
2219
|
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""cylc [control] poll [OPTIONS] ARGS
Poll (query) task jobs to verify and update their statuses.
cylc poll REG - poll all active tasks
cylc poll REG TASK_GLOB ... - poll multiple active tasks or families
"""
import sys
if '--use-ssh' in sys.argv[1:]:
sys.argv.remove('--use-ssh')
from cylc.flow.remote import remrun
if remrun():
sys.exit(0)
from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.network.client import SuiteRuntimeClient
from cylc.flow.terminal import prompt, cli_function
def get_option_parser():
parser = COP(
__doc__, comms=True, multitask=True,
argdoc=[
('REG', 'Suite name'),
('[TASK_GLOB ...]', 'Task matching patterns')])
parser.add_option(
"-s", "--succeeded", help="Allow polling of succeeded tasks.",
action="store_true", default=False, dest="poll_succ")
return parser
@cli_function(get_option_parser)
def main(parser, options, suite, *task_globs):
if task_globs:
prompt('Poll task %s in %s' % (task_globs, suite), options.force)
else:
prompt('Poll ALL tasks in %s' % (suite), options.force)
pclient = SuiteRuntimeClient(
suite, options.owner, options.host, options.port,
options.comms_timeout)
pclient(
'poll_tasks',
{'tasks': task_globs, 'poll_succeeded': options.poll_succ}
)
if __name__ == "__main__":
main()
|
gpl-3.0
|
bjoernricks/python-quilt
|
quilt/db.py
|
1
|
10535
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 - 2017 Björn Ricks <[email protected]>
#
# See LICENSE comming with the source of python-quilt for details.
from __future__ import print_function
import getopt
import os.path
import six
import sys
from quilt.error import QuiltError, UnknownPatch
from quilt.patch import Patch
from quilt.utils import _encode_str
DB_VERSION = 2
class DBError(QuiltError):
pass
class PatchLine(object):
""" Represents a line in a series files """
def __init__(self, patch):
""" patch can be either a string or a Patch object """
self.comment = ""
self.patch = None
self.line = ""
if isinstance(patch, six.string_types):
self._parse_line(patch)
elif isinstance(patch, Patch):
self.patch = patch
self.line = patch.get_name()
def _parse_line(self, line):
line = line.rstrip("\r\n")
self.line = line
if line.rstrip().startswith("#"):
self.comment = line
return
if not line.strip():
# empty line
return
if "#" in line:
patchline, self.comment = line.split("#", 1)
else:
patchline = line
patchline = patchline.strip()
if not patchline:
return
patch_args = None
strip = 1
reverse = False
if " " in patchline:
patch_name, patch_args = patchline.split(" ", 1)
else:
patch_name = patchline
if patch_args:
patch_args = patch_args.split()
try:
opts, args = getopt.getopt(patch_args, "p:R", ["strip=",
"reverse"])
for o, a in opts:
if o in ["-p", "--strip"]:
strip = a
elif o in ["-R", "--reverse"]:
reverse = True
except getopt.GetoptError as err:
print(err, file=sys.stderr)
self.patch = Patch(patch_name, strip, reverse)
def get_patch(self):
return self.patch
def get_comment(self):
return self.comment
def set_comment(self, comment):
self.comment = comment
def __str__(self):
return self.line
class PatchSeries(object):
def __init__(self, dirname, filename):
self.dirname = dirname
self.filename = filename
self.series_file = os.path.join(dirname, filename)
self.read()
def _check_patch(self, patch):
if not self.is_patch(patch):
raise UnknownPatch(self, patch)
def exists(self):
""" Returns True if series file exists """
return os.path.exists(self.series_file)
def read(self):
""" Reads all patches from the series file """
self.patchlines = []
self.patch2line = dict()
if self.exists():
with open(self.series_file, "r") as f:
for line in f:
self.add_patch(line)
def save(self):
""" Saves current patches list in the series file """
with open(self.series_file, "wb") as f:
for patchline in self.patchlines:
f.write(_encode_str(str(patchline)))
f.write(b"\n")
def add_patch(self, patch):
""" Add a patch to the patches list """
patchline = PatchLine(patch)
patch = patchline.get_patch()
if patch:
self.patch2line[patch] = patchline
self.patchlines.append(patchline)
def _add_patches(self, patches):
for patch_name in patches:
self.add_patch(patch_name)
def insert_patches(self, patches):
""" Insert list of patches at the front of the curent patches list """
patchlines = []
for patch_name in patches:
patchline = PatchLine(patch_name)
patch = patchline.get_patch()
if patch:
self.patch2line[patch] = patchline
patchlines.append(patchline)
patchlines.extend(self.patchlines)
self.patchlines = patchlines
def add_patches(self, patches, after=None):
""" Add a list of patches to the patches list """
if after is None:
self.insert_patches(patches)
else:
self._check_patch(after)
patchlines = self._patchlines_before(after)
patchlines.append(self.patch2line[after])
for patch in patches:
patchline = PatchLine(patch)
patchlines.append(patchline)
self.patch2line[patchline.get_patch()] = patchline
patchlines.extend(self._patchlines_after(after))
self.patchlines = patchlines
def remove_patch(self, patch):
""" Remove a patch from the patches list """
self._check_patch(patch)
patchline = self.patch2line[patch]
del self.patch2line[patch]
self.patchlines.remove(patchline)
def top_patch(self):
""" Returns the last patch from the patches list or None if the list
is empty """
patches = self.patches()
if not patches:
return None
return patches[-1]
def first_patch(self):
""" Returns the first patch from the patches list or None if the list
is empty """
patches = self.patches()
if not patches:
return None
return patches[0]
def patches(self):
""" Returns the list of patches """
return [line.get_patch() for line in self.patchlines if
line.get_patch()]
def _patchlines_after(self, patch):
self._check_patch(patch)
patchline = self.patch2line[patch]
index = self.patchlines.index(patchline) + 1
if index >= len(self.patchlines):
return []
return self.patchlines[index:]
def _patchlines_before(self, patch):
self._check_patch(patch)
patchline = self.patch2line[patch]
index = self.patchlines.index(patchline)
return self.patchlines[:index]
def _patchlines_until(self, patch):
self._check_patch(patch)
patchline = self.patch2line[patch]
index = self.patchlines.index(patchline) + 1
return self.patchlines[:index]
def patches_after(self, patch):
""" Returns a list of patches after patch from the patches list """
return [line.get_patch() for line in self._patchlines_after(patch) if
line.get_patch()]
def patch_after(self, patch):
""" Returns the patch followed by patch from the patches list or None if
no patch after can be found.
"""
patches = self.patches_after(patch)
if patches:
return patches[0]
return None
def patches_before(self, patch):
""" Returns a list of patches before patch from the patches list """
return [line.get_patch() for line in self._patchlines_before(patch)
if line.get_patch()]
def patch_before(self, patch):
""" Returns the patch before patch from the patches list or None if no
patch before can be found.
"""
patches = self.patches_before(patch)
if patches:
return patches[-1]
return None
def patches_until(self, patch):
""" Returns a list of patches before patch from the patches list
including the provided patch
"""
return [line.get_patch() for line in self._patchlines_until(patch) if
line.get_patch()]
def is_patch(self, patch):
""" Returns True if patch is in the list of patches. Otherwise it
returns False.
"""
return patch in self.patch2line
def is_empty(self):
""" Returns true if no patch is in the series
"""
return len(self.patch2line) == 0
def replace(self, old_patch, new_patch):
""" Replace old_patch with new_patch
The method only replaces the patch and doesn't change any comments.
"""
self._check_patch(old_patch)
old_patchline = self.patch2line[old_patch]
index = self.patchlines.index(old_patchline)
self.patchlines.pop(index)
new_patchline = PatchLine(new_patch)
new_patchline.set_comment(old_patchline.get_comment())
self.patchlines.insert(index, new_patchline)
del self.patch2line[old_patch]
self.patch2line[new_patch] = new_patchline
class Db(PatchSeries):
""" Represents the "Database" of quilt which contains the list of current
applied patches
"""
def __init__(self, dirname):
self.version_file = os.path.join(dirname, ".version")
if os.path.exists(self.version_file):
self.check_version(self.version_file)
super(Db, self).__init__(dirname, "applied-patches")
def _create_version(self, version_file):
with open(version_file, "w") as f:
f.write(str(DB_VERSION))
def create(self):
""" Creates the dirname and inserts a .version file """
if not os.path.exists(self.dirname):
os.makedirs(self.dirname)
self._create_version(self.version_file)
def save(self):
""" Create version file and save applied patches """
self.create()
super(Db, self).save()
def applied_patches(self):
""" Lists all applied patches """
return self.patches()
def check_version(self, version_file):
""" Checks if the .version file in dirname has the correct supported
version number """
# The file contains a version number as a decimal integer, optionally
# followed by a newline
with open(version_file, "r") as f:
version = f.read(10)
version = version.rstrip("\r\n")
if len(version) >= 10 or version != str(DB_VERSION):
raise DBError("The quilt meta-data version of %s is not supported "
"by python-quilt. python-quilt only supports "
"version %s." % (version, DB_VERSION))
class Series(PatchSeries):
""" Represents the series file of quilt which contains the patches to be
applied
"""
def __init__(self, dirname):
super(Series, self).__init__(dirname, "series")
|
mit
|
abought/osf.io
|
website/addons/badges/tests/utils.py
|
59
|
1215
|
import string
import mock
import random
from website.addons.badges.model import Badge
def create_mock_badger(mock_badger):
#mock_badger.configured = True
mock_badger.name = 'Honey'
mock_badger.email = '[email protected]'
for _ in range(4):
create_mock_badge(mock_badger)
mock_badger.save()
mock_badger.reload()
return mock_badger
@mock.patch('website.addons.badges.model.badges.acquire_badge_image')
def create_mock_badge(issuer, mock_img, badge_data=None):
mock_img.return_value = 'temp.png'
if not badge_data:
badge_data = create_badge_dict()
return Badge.create(issuer, badge_data)
def create_badge_dict():
return {
'badgeName': ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(4)),
'description': 'Just doesn\'t '.join(random.choice(string.ascii_letters + string.digits) for _ in range(6)),
'imageurl': 'Image',
'criteria': 'Don\'t give a '.join(random.choice(string.ascii_letters + string.digits) for _ in range(4))
}
def get_garbage(length=10):
return '<script><a><b><img>'.join(random.choice(string.ascii_letters + string.digits) for _ in range(length)).join('</script></b>')
|
apache-2.0
|
OpenXT/sync-cli
|
sync_cli/show_disk.py
|
1
|
1803
|
#
# Copyright (c) 2012 Citrix Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import cx_Oracle
import arguments
import connect
import output
def add_subparser(subparsers):
description = """Show information about a disk."""
parser = subparsers.add_parser("show-disk",
help="show disk",
description=description)
arguments.add_output_args(parser)
parser.add_argument("disk",
metavar="DISK_UUID",
action=arguments.StoreSingleNonEmptyValue,
help="disk uuid")
parser.set_defaults(func=_run)
def _run(args, config):
connection = connect.connect(args, config)
cursor = connection.cursor()
disk = cursor.callfunc("sync_admin.get_disk",
cx_Oracle.CURSOR,
keywordParameters={
"disk_uuid": args.disk})
if args.output == output.QUIET:
fields = ["disk_uuid"]
else:
fields = None
output.print_cursor(disk, fields, args.output, single_row=True)
|
gpl-2.0
|
cbries/utilities
|
weatherpi/thirdparty/Adafruit/Adafruit_I2C/Adafruit_I2C.py
|
116
|
5501
|
#!/usr/bin/python
import re
import smbus
# ===========================================================================
# Adafruit_I2C Class
# ===========================================================================
class Adafruit_I2C(object):
@staticmethod
def getPiRevision():
"Gets the version number of the Raspberry Pi board"
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
try:
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, assume revision 0 like older code for compatibility.
return 0
except:
return 0
@staticmethod
def getPiI2CBusNumber():
# Gets the I2C bus number /dev/i2c#
return 1 if Adafruit_I2C.getPiRevision() > 1 else 0
def __init__(self, address, busnum=-1, debug=False):
self.address = address
# By default, the correct I2C bus is auto-detected using /proc/cpuinfo
# Alternatively, you can hard-code the bus version below:
# self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's)
# self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's)
self.bus = smbus.SMBus(busnum if busnum >= 0 else Adafruit_I2C.getPiI2CBusNumber())
self.debug = debug
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or long (32-bit) value"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
print "Error accessing 0x%02X: Check your I2C address" % self.address
return -1
def write8(self, reg, value):
"Writes an 8-bit value to the specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
if self.debug:
print "I2C: Wrote 0x%02X to register 0x%02X" % (value, reg)
except IOError, err:
return self.errMsg()
def write16(self, reg, value):
"Writes a 16-bit value to the specified register/address pair"
try:
self.bus.write_word_data(self.address, reg, value)
if self.debug:
print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" %
(value, reg, reg+1))
except IOError, err:
return self.errMsg()
def writeRaw8(self, value):
"Writes an 8-bit value on the bus"
try:
self.bus.write_byte(self.address, value)
if self.debug:
print "I2C: Wrote 0x%02X" % value
except IOError, err:
return self.errMsg()
def writeList(self, reg, list):
"Writes an array of bytes using I2C format"
try:
if self.debug:
print "I2C: Writing list to register 0x%02X:" % reg
print list
self.bus.write_i2c_block_data(self.address, reg, list)
except IOError, err:
return self.errMsg()
def readList(self, reg, length):
"Read a list of bytes from the I2C device"
try:
results = self.bus.read_i2c_block_data(self.address, reg, length)
if self.debug:
print ("I2C: Device 0x%02X returned the following from reg 0x%02X" %
(self.address, reg))
print results
return results
except IOError, err:
return self.errMsg()
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readS8(self, reg):
"Reads a signed byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127: result -= 256
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readU16(self, reg, little_endian=True):
"Reads an unsigned 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
# Swap bytes if using big endian because read_word_data assumes little
# endian on ARM (little endian) systems.
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)
return result
except IOError, err:
return self.errMsg()
def readS16(self, reg, little_endian=True):
"Reads a signed 16-bit value from the I2C device"
try:
result = self.readU16(reg,little_endian)
if result > 32767: result -= 65536
return result
except IOError, err:
return self.errMsg()
if __name__ == '__main__':
try:
bus = Adafruit_I2C(address=0)
print "Default I2C bus is accessible"
except:
print "Error accessing default I2C bus"
|
mit
|
figarocms/thumbor
|
thumbor/detectors/queued_detector/__init__.py
|
6
|
1501
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from redis import Redis, RedisError
from remotecv.unique_queue import UniqueQueue
from thumbor.detectors import BaseDetector
from thumbor.utils import logger
class QueuedDetector(BaseDetector):
queue = None
detection_type = 'all'
def detect(self, callback):
self.context.request.prevent_result_storage = True
try:
if not QueuedDetector.queue:
redis = Redis(host=self.context.config.REDIS_QUEUE_SERVER_HOST,
port=self.context.config.REDIS_QUEUE_SERVER_PORT,
db=self.context.config.REDIS_QUEUE_SERVER_DB,
password=self.context.config.REDIS_QUEUE_SERVER_PASSWORD)
QueuedDetector.queue = UniqueQueue(server=redis)
QueuedDetector.queue.enqueue_unique_from_string(
'remotecv.pyres_tasks.DetectTask', 'Detect',
args=[self.detection_type, self.context.request.image_url],
key=self.context.request.image_url
)
except RedisError:
self.context.request.detection_error = True
QueuedDetector.queue = None
logger.exception('Redis Error')
finally:
callback([])
|
mit
|
WalrusCow/wmcd.ca
|
python/blog/posts.py
|
1
|
1723
|
import functools
import hashlib
from datetime import datetime
import blog.db as db
class Post():
_FIELDS = ('body', 'title', 'author')
_HASH_FIELDS = _FIELDS + ('timestamp',)
_ALL_FIELDS = _HASH_FIELDS + ('id',)
def __init__(self, kvp):
for field in Post._FIELDS:
if field not in kvp:
self.valid = False
return
setattr(self, field, kvp[field])
self.valid = True
self.timestamp = kvp.get('timestamp', datetime.now())
self.id = kvp.get('id', self._hash()[:16])
def __iter__(self):
for field in Post._ALL_FIELDS:
yield field, getattr(self, field)
def _hash(self):
''' Compute a hash of the current object. '''
hasher = hashlib.sha256()
for field in Post._HASH_FIELDS:
hasher.update(str(getattr(self, field)).encode())
return hasher.hexdigest()
def toPost(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
postData = func(*args, **kwargs)
return Post(postData) if postData is not None else None
return wrapper
@toPost
def previousPost(post):
''' Return the post immediately before this one. '''
return db.posts.find_one({'timestamp': {'$lt': post.timestamp}},
sort=[('timestamp', -1)])
@toPost
def nextPost(post):
''' Return the post immediately after this one, if any. '''
return db.posts.find_one({'timestamp': {'$gt': post.timestamp}},
sort=[('timestamp', 1)])
@toPost
def retrieve(id):
''' Retrieve post with given id. '''
return db.posts.find_one({'id': id})
def create(post):
return db.posts.insert(dict(post))
|
mit
|
lyynocs/magento-connector-v8
|
magentoerpconnect/tests/test_related_action.py
|
1
|
3948
|
# -*- coding: utf-8 -*-
import mock
import openerp
import openerp.tests.common as common
from openerp.addons.connector.queue.job import (
Job,
OpenERPJobStorage,
)
from openerp.addons.connector.session import (
ConnectorSession)
from .common import mock_api
from .test_data import magento_base_responses
from ..unit.import_synchronizer import import_batch, import_record
from ..unit.export_synchronizer import export_record
class test_related_action_storage(common.TransactionCase):
""" Test related actions on stored jobs """
def setUp(self):
super(test_related_action_storage, self).setUp()
cr, uid = self.cr, self.uid
backend_model = self.registry('magento.backend')
self.session = ConnectorSession(cr, uid)
self.session.context['__test_no_commit'] = True
warehouse_id = self.ref('stock.warehouse0')
backend_id = backend_model.create(
cr,
uid,
{'name': 'Test Magento',
'version': '1.7',
'location': 'http://anyurl',
'username': 'username',
'warehouse_id': warehouse_id,
'password': '42'})
self.backend = backend_model.browse(cr, uid, backend_id)
# import the base informations
with mock_api(magento_base_responses):
import_batch(self.session, 'magento.website', backend_id)
import_batch(self.session, 'magento.store', backend_id)
import_batch(self.session, 'magento.storeview', backend_id)
self.MagentoProduct = self.registry('magento.product.product')
self.QueueJob = self.registry('queue.job')
def test_unwrap_binding(self):
""" Open a related action opening an unwrapped binding """
cr, uid = self.cr, self.uid
product_id = self.ref('product.product_product_7')
magento_product_id = self.MagentoProduct.create(
cr, uid,
{'openerp_id': product_id,
'backend_id': self.backend.id})
stored = self._create_job(export_record, 'magento.product.product',
magento_product_id)
expected = {
'name': mock.ANY,
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_id': product_id,
'res_model': 'product.product',
}
self.assertEquals(stored.open_related_action(), expected)
def _create_job(self, func, *args):
cr, uid = self.cr, self.uid
job = Job(func=func, args=args)
storage = OpenERPJobStorage(self.session)
storage.store(job)
stored_ids = self.QueueJob.search(self.cr, self.uid,
[('uuid', '=', job.uuid)])
self.assertEqual(len(stored_ids), 1)
return self.QueueJob.browse(cr, uid, stored_ids[0])
def test_link(self):
""" Open a related action opening an url on Magento """
self.backend.write({'admin_location': 'http://www.example.com/admin'})
self.backend.refresh()
stored = self._create_job(import_record, 'magento.product.product',
self.backend.id, 123456)
url = 'http://www.example.com/admin/catalog_product/edit/id/123456'
expected = {
'type': 'ir.actions.act_url',
'target': 'new',
'url': url,
}
self.assertEquals(stored.open_related_action(), expected)
def test_link_no_location(self):
""" Related action opening an url, admin location is not configured """
self.backend.write({'admin_location': False})
self.backend.refresh()
stored = self._create_job(import_record, 'magento.product.product',
self.backend.id, 123456)
with self.assertRaises(openerp.osv.orm.except_orm):
stored.open_related_action()
|
agpl-3.0
|
linjoahow/W16_test1
|
static/Brython3.1.1-20150328-091302/Lib/browser/markdown.py
|
623
|
13060
|
# -*- coding: utf-8 -*-
try:
import _jsre as re
except:
import re
import random
import time
letters = 'abcdefghijklmnopqrstuvwxyz'
letters += letters.upper()+'0123456789'
class URL:
def __init__(self,src):
elts = src.split(maxsplit=1)
self.href = elts[0]
self.alt = ''
if len(elts)==2:
alt = elts[1]
if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1]
elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1]
elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1]
class CodeBlock:
def __init__(self,line):
self.lines = [line]
if line.startswith("```") and len(line)>3:
self.info = line[3:]
else:
self.info = None
def to_html(self):
if self.lines[0].startswith("`"):
self.lines.pop(0)
res = escape('\n'.join(self.lines))
res = unmark(res)
_class = self.info or "marked"
res = '<pre class="%s">%s</pre>\n' %(_class, res)
return res,[]
class HtmlBlock:
def __init__(self, src):
self.src = src
def to_html(self):
return self.src
class Marked:
def __init__(self, line=''):
self.line = line
self.children = []
def to_html(self):
return apply_markdown(self.line)
# get references
refs = {}
ref_pattern = r"^\[(.*)\]:\s+(.*)"
def mark(src):
global refs
t0 = time.time()
refs = {}
# split source in sections
# sections can be :
# - a block-level HTML element (markdown syntax will not be processed)
# - a script
# - a span-level HTML tag (markdown syntax will be processed)
# - a code block
# normalise line feeds
src = src.replace('\r\n','\n')
# lines followed by dashes
src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src)
src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src)
lines = src.split('\n')+['']
i = bq = 0
ul = ol = 0
while i<len(lines):
# enclose lines starting by > in a blockquote
if lines[i].startswith('>'):
nb = 1
while nb<len(lines[i]) and lines[i][nb]=='>':
nb += 1
lines[i] = lines[i][nb:]
if nb>bq:
lines.insert(i,'<blockquote>'*(nb-bq))
i += 1
bq = nb
elif nb<bq:
lines.insert(i,'</blockquote>'*(bq-nb))
i += 1
bq = nb
elif bq>0:
lines.insert(i,'</blockquote>'*bq)
i += 1
bq = 0
# unordered lists
if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \
and len(lines[i].lstrip())>1 \
and lines[i].lstrip()[1]==' ' \
and (i==0 or ul or not lines[i-1].strip()):
# line indentation indicates nesting level
nb = 1+len(lines[i])-len(lines[i].lstrip())
lines[i] = '<li>'+lines[i][nb:]
if nb>ul:
lines.insert(i,'<ul>'*(nb-ul))
i += 1
elif nb<ul:
lines.insert(i,'</ul>'*(ul-nb))
i += 1
ul = nb
elif ul and not lines[i].strip():
if i<len(lines)-1 and lines[i+1].strip() \
and not lines[i+1].startswith(' '):
nline = lines[i+1].lstrip()
if nline[0] in '-+*' and len(nline)>1 and nline[1]==' ':
pass
else:
lines.insert(i,'</ul>'*ul)
i += 1
ul = 0
# ordered lists
mo = re.search(r'^(\d+\.)',lines[i])
if mo:
if not ol:
lines.insert(i,'<ol>')
i += 1
lines[i] = '<li>'+lines[i][len(mo.groups()[0]):]
ol = 1
elif ol and not lines[i].strip() and i<len(lines)-1 \
and not lines[i+1].startswith(' ') \
and not re.search(r'^(\d+\.)',lines[i+1]):
lines.insert(i,'</ol>')
i += 1
ol = 0
i += 1
if ul:
lines.append('</ul>'*ul)
if ol:
lines.append('</ol>'*ol)
if bq:
lines.append('</blockquote>'*bq)
t1 = time.time()
#print('part 1', t1-t0)
sections = []
scripts = []
section = Marked()
i = 0
while i<len(lines):
line = lines[i]
if line.strip() and line.startswith(' '):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line[4:])
j = i+1
while j<len(lines) and lines[j].startswith(' '):
section.lines.append(lines[j][4:])
j += 1
sections.append(section)
section = Marked()
i = j
continue
elif line.strip() and line.startswith("```"):
# fenced code blocks à la Github Flavoured Markdown
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line)
j = i+1
while j<len(lines) and not lines[j].startswith("```"):
section.lines.append(lines[j])
j += 1
sections.append(section)
section = Marked()
i = j+1
continue
elif line.lower().startswith('<script'):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
j = i+1
while j<len(lines):
if lines[j].lower().startswith('</script>'):
scripts.append('\n'.join(lines[i+1:j]))
for k in range(i,j+1):
lines[k] = ''
break
j += 1
i = j
continue
# atext header
elif line.startswith('#'):
level = 1
line = lines[i]
while level<len(line) and line[level]=='#' and level<=6:
level += 1
if not line[level+1:].strip():
if level==1:
i += 1
continue
else:
lines[i] = '<H%s>%s</H%s>\n' %(level-1,'#',level-1)
else:
lines[i] = '<H%s>%s</H%s>\n' %(level,line[level+1:],level)
else:
mo = re.search(ref_pattern,line)
if mo is not None:
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
key = mo.groups()[0]
value = URL(mo.groups()[1])
refs[key.lower()] = value
else:
if not line.strip():
line = '<p></p>'
if section.line:
section.line += '\n'
section.line += line
i += 1
t2 = time.time()
#print('section 2', t2-t1)
if isinstance(section,Marked) and section.line:
sections.append(section)
res = ''
for section in sections:
mk,_scripts = section.to_html()
res += mk
scripts += _scripts
#print('end mark', time.time()-t2)
return res,scripts
def escape(czone):
czone = czone.replace('&','&')
czone = czone.replace('<','<')
czone = czone.replace('>','>')
czone = czone.replace('_','_')
czone = czone.replace('*','*')
return czone
def s_escape(mo):
# used in re.sub
czone = mo.string[mo.start():mo.end()]
return escape(czone)
def unmark(code_zone):
# convert _ to _ inside inline code
code_zone = code_zone.replace('_','_')
return code_zone
def s_unmark(mo):
# convert _ to _ inside inline code
code_zone = mo.string[mo.start():mo.end()]
code_zone = code_zone.replace('_','_')
return code_zone
def apply_markdown(src):
scripts = []
key = None
t0 = time.time()
i = 0
while i<len(src):
if src[i]=='[':
start_a = i+1
while True:
end_a = src.find(']',i)
if end_a == -1:
break
if src[end_a-1]=='\\':
i = end_a+1
else:
break
if end_a>-1 and src[start_a:end_a].find('\n')==-1:
link = src[start_a:end_a]
rest = src[end_a+1:].lstrip()
if rest and rest[0]=='(':
j = 0
while True:
end_href = rest.find(')',j)
if end_href == -1:
break
if rest[end_href-1]=='\\':
j = end_href+1
else:
break
if end_href>-1 and rest[:end_href].find('\n')==-1:
tag = '<a href="'+rest[1:end_href]+'">'+link+'</a>'
src = src[:start_a-1]+tag+rest[end_href+1:]
i = start_a+len(tag)
elif rest and rest[0]=='[':
j = 0
while True:
end_key = rest.find(']',j)
if end_key == -1:
break
if rest[end_key-1]=='\\':
j = end_key+1
else:
break
if end_key>-1 and rest[:end_key].find('\n')==-1:
if not key:
key = link
if key.lower() not in refs:
raise KeyError('unknown reference %s' %key)
url = refs[key.lower()]
tag = '<a href="'+url+'">'+link+'</a>'
src = src[:start_a-1]+tag+rest[end_key+1:]
i = start_a+len(tag)
i += 1
t1 = time.time()
#print('apply markdown 1', t1-t0)
# before applying the markup with _ and *, isolate HTML tags because
# they can contain these characters
# We replace them temporarily by a random string
rstr = ''.join(random.choice(letters) for i in range(16))
i = 0
state = None
start = -1
data = ''
tags = []
while i<len(src):
if src[i]=='<':
j = i+1
while j<len(src):
if src[j]=='"' or src[j]=="'":
if state==src[j] and src[j-1]!='\\':
state = None
j = start+len(data)+1
data = ''
elif state==None:
state = src[j]
start = j
else:
data += src[j]
elif src[j]=='>' and state is None:
tags.append(src[i:j+1])
src = src[:i]+rstr+src[j+1:]
i += len(rstr)
break
elif state=='"' or state=="'":
data += src[j]
elif src[j]=='\n':
# if a sign < is not followed by > in the same ligne, it
# is the sign "lesser than"
src = src[:i]+'<'+src[i+1:]
j=i+4
break
j += 1
elif src[i]=='`' and i>0 and src[i-1]!='\\':
# ignore the content of inline code
j = i+1
while j<len(src):
if src[j]=='`' and src[j-1]!='\\':
break
j += 1
i = j
i += 1
t2 = time.time()
#print('apply markdown 2', len(src), t2-t1)
# escape "<", ">", "&" and "_" in inline code
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,s_escape,src)
# replace escaped ` _ * by HTML characters
src = src.replace(r'\\`','`')
src = src.replace(r'\_','_')
src = src.replace(r'\*','*')
# emphasis
strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')]
for tag,strong_pattern in strong_patterns:
src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src)
em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')]
for tag,em_pattern in em_patterns:
src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src)
# inline code
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,r'<code>\1</code>',src)
# restore tags
while True:
pos = src.rfind(rstr)
if pos==-1:
break
repl = tags.pop()
src = src[:pos]+repl+src[pos+len(rstr):]
src = '<p>'+src+'</p>'
t3 = time.time()
#print('apply markdown 3', t3-t2)
return src,scripts
|
gpl-3.0
|
gorjuce/odoo
|
addons/hr_timesheet_invoice/report/__init__.py
|
433
|
1136
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_profit
import report_analytic
import hr_timesheet_invoice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
rpp0/gr-lora
|
python/qa_receiver.py
|
2
|
36952
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# GNU GENERAL PUBLIC LICENSE
# Version 3, 29 June 2007
#
# Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
# Everyone is permitted to copy and distribute verbatim copies
# of this license document, but changing it is not allowed.
#
# Preamble
#
# The GNU General Public License is a free, copyleft license for
# software and other kinds of works.
#
# The licenses for most software and other practical works are designed
# to take away your freedom to share and change the works. By contrast,
# the GNU General Public License is intended to guarantee your freedom to
# share and change all versions of a program--to make sure it remains free
# software for all its users. We, the Free Software Foundation, use the
# GNU General Public License for most of our software; it applies also to
# any other work released this way by its authors. You can apply it to
# your programs, too.
#
# When we speak of free software, we are referring to freedom, not
# price. Our General Public Licenses are designed to make sure that you
# have the freedom to distribute copies of free software (and charge for
# them if you wish), that you receive source code or can get it if you
# want it, that you can change the software or use pieces of it in new
# free programs, and that you know you can do these things.
#
# To protect your rights, we need to prevent others from denying you
# these rights or asking you to surrender the rights. Therefore, you have
# certain responsibilities if you distribute copies of the software, or if
# you modify it: responsibilities to respect the freedom of others.
#
# For example, if you distribute copies of such a program, whether
# gratis or for a fee, you must pass on to the recipients the same
# freedoms that you received. You must make sure that they, too, receive
# or can get the source code. And you must show them these terms so they
# know their rights.
#
# Developers that use the GNU GPL protect your rights with two steps:
# (1) assert copyright on the software, and (2) offer you this License
# giving you legal permission to copy, distribute and/or modify it.
#
# For the developers' and authors' protection, the GPL clearly explains
# that there is no warranty for this free software. For both users' and
# authors' sake, the GPL requires that modified versions be marked as
# changed, so that their problems will not be attributed erroneously to
# authors of previous versions.
#
# Some devices are designed to deny users access to install or run
# modified versions of the software inside them, although the manufacturer
# can do so. This is fundamentally incompatible with the aim of
# protecting users' freedom to change the software. The systematic
# pattern of such abuse occurs in the area of products for individuals to
# use, which is precisely where it is most unacceptable. Therefore, we
# have designed this version of the GPL to prohibit the practice for those
# products. If such problems arise substantially in other domains, we
# stand ready to extend this provision to those domains in future versions
# of the GPL, as needed to protect the freedom of users.
#
# Finally, every program is threatened constantly by software patents.
# States should not allow patents to restrict development and use of
# software on general-purpose computers, but in those that do, we wish to
# avoid the special danger that patents applied to a free program could
# make it effectively proprietary. To prevent this, the GPL assures that
# patents cannot be used to render the program non-free.
#
# The precise terms and conditions for copying, distribution and
# modification follow.
#
# TERMS AND CONDITIONS
#
# 0. Definitions.
#
# "This License" refers to version 3 of the GNU General Public License.
#
# "Copyright" also means copyright-like laws that apply to other kinds of
# works, such as semiconductor masks.
#
# "The Program" refers to any copyrightable work licensed under this
# License. Each licensee is addressed as "you". "Licensees" and
# "recipients" may be individuals or organizations.
#
# To "modify" a work means to copy from or adapt all or part of the work
# in a fashion requiring copyright permission, other than the making of an
# exact copy. The resulting work is called a "modified version" of the
# earlier work or a work "based on" the earlier work.
#
# A "covered work" means either the unmodified Program or a work based
# on the Program.
#
# To "propagate" a work means to do anything with it that, without
# permission, would make you directly or secondarily liable for
# infringement under applicable copyright law, except executing it on a
# computer or modifying a private copy. Propagation includes copying,
# distribution (with or without modification), making available to the
# public, and in some countries other activities as well.
#
# To "convey" a work means any kind of propagation that enables other
# parties to make or receive copies. Mere interaction with a user through
# a computer network, with no transfer of a copy, is not conveying.
#
# An interactive user interface displays "Appropriate Legal Notices"
# to the extent that it includes a convenient and prominently visible
# feature that (1) displays an appropriate copyright notice, and (2)
# tells the user that there is no warranty for the work (except to the
# extent that warranties are provided), that licensees may convey the
# work under this License, and how to view a copy of this License. If
# the interface presents a list of user commands or options, such as a
# menu, a prominent item in the list meets this criterion.
#
# 1. Source Code.
#
# The "source code" for a work means the preferred form of the work
# for making modifications to it. "Object code" means any non-source
# form of a work.
#
# A "Standard Interface" means an interface that either is an official
# standard defined by a recognized standards body, or, in the case of
# interfaces specified for a particular programming language, one that
# is widely used among developers working in that language.
#
# The "System Libraries" of an executable work include anything, other
# than the work as a whole, that (a) is included in the normal form of
# packaging a Major Component, but which is not part of that Major
# Component, and (b) serves only to enable use of the work with that
# Major Component, or to implement a Standard Interface for which an
# implementation is available to the public in source code form. A
# "Major Component", in this context, means a major essential component
# (kernel, window system, and so on) of the specific operating system
# (if any) on which the executable work runs, or a compiler used to
# produce the work, or an object code interpreter used to run it.
#
# The "Corresponding Source" for a work in object code form means all
# the source code needed to generate, install, and (for an executable
# work) run the object code and to modify the work, including scripts to
# control those activities. However, it does not include the work's
# System Libraries, or general-purpose tools or generally available free
# programs which are used unmodified in performing those activities but
# which are not part of the work. For example, Corresponding Source
# includes interface definition files associated with source files for
# the work, and the source code for shared libraries and dynamically
# linked subprograms that the work is specifically designed to require,
# such as by intimate data communication or control flow between those
# subprograms and other parts of the work.
#
# The Corresponding Source need not include anything that users
# can regenerate automatically from other parts of the Corresponding
# Source.
#
# The Corresponding Source for a work in source code form is that
# same work.
#
# 2. Basic Permissions.
#
# All rights granted under this License are granted for the term of
# copyright on the Program, and are irrevocable provided the stated
# conditions are met. This License explicitly affirms your unlimited
# permission to run the unmodified Program. The output from running a
# covered work is covered by this License only if the output, given its
# content, constitutes a covered work. This License acknowledges your
# rights of fair use or other equivalent, as provided by copyright law.
#
# You may make, run and propagate covered works that you do not
# convey, without conditions so long as your license otherwise remains
# in force. You may convey covered works to others for the sole purpose
# of having them make modifications exclusively for you, or provide you
# with facilities for running those works, provided that you comply with
# the terms of this License in conveying all material for which you do
# not control copyright. Those thus making or running the covered works
# for you must do so exclusively on your behalf, under your direction
# and control, on terms that prohibit them from making any copies of
# your copyrighted material outside their relationship with you.
#
# Conveying under any other circumstances is permitted solely under
# the conditions stated below. Sublicensing is not allowed; section 10
# makes it unnecessary.
#
# 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
#
# No covered work shall be deemed part of an effective technological
# measure under any applicable law fulfilling obligations under article
# 11 of the WIPO copyright treaty adopted on 20 December 1996, or
# similar laws prohibiting or restricting circumvention of such
# measures.
#
# When you convey a covered work, you waive any legal power to forbid
# circumvention of technological measures to the extent such circumvention
# is effected by exercising rights under this License with respect to
# the covered work, and you disclaim any intention to limit operation or
# modification of the work as a means of enforcing, against the work's
# users, your or third parties' legal rights to forbid circumvention of
# technological measures.
#
# 4. Conveying Verbatim Copies.
#
# You may convey verbatim copies of the Program's source code as you
# receive it, in any medium, provided that you conspicuously and
# appropriately publish on each copy an appropriate copyright notice;
# keep intact all notices stating that this License and any
# non-permissive terms added in accord with section 7 apply to the code;
# keep intact all notices of the absence of any warranty; and give all
# recipients a copy of this License along with the Program.
#
# You may charge any price or no price for each copy that you convey,
# and you may offer support or warranty protection for a fee.
#
# 5. Conveying Modified Source Versions.
#
# You may convey a work based on the Program, or the modifications to
# produce it from the Program, in the form of source code under the
# terms of section 4, provided that you also meet all of these conditions:
#
# a) The work must carry prominent notices stating that you modified
# it, and giving a relevant date.
#
# b) The work must carry prominent notices stating that it is
# released under this License and any conditions added under section
# 7. This requirement modifies the requirement in section 4 to
# "keep intact all notices".
#
# c) You must license the entire work, as a whole, under this
# License to anyone who comes into possession of a copy. This
# License will therefore apply, along with any applicable section 7
# additional terms, to the whole of the work, and all its parts,
# regardless of how they are packaged. This License gives no
# permission to license the work in any other way, but it does not
# invalidate such permission if you have separately received it.
#
# d) If the work has interactive user interfaces, each must display
# Appropriate Legal Notices; however, if the Program has interactive
# interfaces that do not display Appropriate Legal Notices, your
# work need not make them do so.
#
# A compilation of a covered work with other separate and independent
# works, which are not by their nature extensions of the covered work,
# and which are not combined with it such as to form a larger program,
# in or on a volume of a storage or distribution medium, is called an
# "aggregate" if the compilation and its resulting copyright are not
# used to limit the access or legal rights of the compilation's users
# beyond what the individual works permit. Inclusion of a covered work
# in an aggregate does not cause this License to apply to the other
# parts of the aggregate.
#
# 6. Conveying Non-Source Forms.
#
# You may convey a covered work in object code form under the terms
# of sections 4 and 5, provided that you also convey the
# machine-readable Corresponding Source under the terms of this License,
# in one of these ways:
#
# a) Convey the object code in, or embodied in, a physical product
# (including a physical distribution medium), accompanied by the
# Corresponding Source fixed on a durable physical medium
# customarily used for software interchange.
#
# b) Convey the object code in, or embodied in, a physical product
# (including a physical distribution medium), accompanied by a
# written offer, valid for at least three years and valid for as
# long as you offer spare parts or customer support for that product
# model, to give anyone who possesses the object code either (1) a
# copy of the Corresponding Source for all the software in the
# product that is covered by this License, on a durable physical
# medium customarily used for software interchange, for a price no
# more than your reasonable cost of physically performing this
# conveying of source, or (2) access to copy the
# Corresponding Source from a network server at no charge.
#
# c) Convey individual copies of the object code with a copy of the
# written offer to provide the Corresponding Source. This
# alternative is allowed only occasionally and noncommercially, and
# only if you received the object code with such an offer, in accord
# with subsection 6b.
#
# d) Convey the object code by offering access from a designated
# place (gratis or for a charge), and offer equivalent access to the
# Corresponding Source in the same way through the same place at no
# further charge. You need not require recipients to copy the
# Corresponding Source along with the object code. If the place to
# copy the object code is a network server, the Corresponding Source
# may be on a different server (operated by you or a third party)
# that supports equivalent copying facilities, provided you maintain
# clear directions next to the object code saying where to find the
# Corresponding Source. Regardless of what server hosts the
# Corresponding Source, you remain obligated to ensure that it is
# available for as long as needed to satisfy these requirements.
#
# e) Convey the object code using peer-to-peer transmission, provided
# you inform other peers where the object code and Corresponding
# Source of the work are being offered to the general public at no
# charge under subsection 6d.
#
# A separable portion of the object code, whose source code is excluded
# from the Corresponding Source as a System Library, need not be
# included in conveying the object code work.
#
# A "User Product" is either (1) a "consumer product", which means any
# tangible personal property which is normally used for personal, family,
# or household purposes, or (2) anything designed or sold for incorporation
# into a dwelling. In determining whether a product is a consumer product,
# doubtful cases shall be resolved in favor of coverage. For a particular
# product received by a particular user, "normally used" refers to a
# typical or common use of that class of product, regardless of the status
# of the particular user or of the way in which the particular user
# actually uses, or expects or is expected to use, the product. A product
# is a consumer product regardless of whether the product has substantial
# commercial, industrial or non-consumer uses, unless such uses represent
# the only significant mode of use of the product.
#
# "Installation Information" for a User Product means any methods,
# procedures, authorization keys, or other information required to install
# and execute modified versions of a covered work in that User Product from
# a modified version of its Corresponding Source. The information must
# suffice to ensure that the continued functioning of the modified object
# code is in no case prevented or interfered with solely because
# modification has been made.
#
# If you convey an object code work under this section in, or with, or
# specifically for use in, a User Product, and the conveying occurs as
# part of a transaction in which the right of possession and use of the
# User Product is transferred to the recipient in perpetuity or for a
# fixed term (regardless of how the transaction is characterized), the
# Corresponding Source conveyed under this section must be accompanied
# by the Installation Information. But this requirement does not apply
# if neither you nor any third party retains the ability to install
# modified object code on the User Product (for example, the work has
# been installed in ROM).
#
# The requirement to provide Installation Information does not include a
# requirement to continue to provide support service, warranty, or updates
# for a work that has been modified or installed by the recipient, or for
# the User Product in which it has been modified or installed. Access to a
# network may be denied when the modification itself materially and
# adversely affects the operation of the network or violates the rules and
# protocols for communication across the network.
#
# Corresponding Source conveyed, and Installation Information provided,
# in accord with this section must be in a format that is publicly
# documented (and with an implementation available to the public in
# source code form), and must require no special password or key for
# unpacking, reading or copying.
#
# 7. Additional Terms.
#
# "Additional permissions" are terms that supplement the terms of this
# License by making exceptions from one or more of its conditions.
# Additional permissions that are applicable to the entire Program shall
# be treated as though they were included in this License, to the extent
# that they are valid under applicable law. If additional permissions
# apply only to part of the Program, that part may be used separately
# under those permissions, but the entire Program remains governed by
# this License without regard to the additional permissions.
#
# When you convey a copy of a covered work, you may at your option
# remove any additional permissions from that copy, or from any part of
# it. (Additional permissions may be written to require their own
# removal in certain cases when you modify the work.) You may place
# additional permissions on material, added by you to a covered work,
# for which you have or can give appropriate copyright permission.
#
# Notwithstanding any other provision of this License, for material you
# add to a covered work, you may (if authorized by the copyright holders of
# that material) supplement the terms of this License with terms:
#
# a) Disclaiming warranty or limiting liability differently from the
# terms of sections 15 and 16 of this License; or
#
# b) Requiring preservation of specified reasonable legal notices or
# author attributions in that material or in the Appropriate Legal
# Notices displayed by works containing it; or
#
# c) Prohibiting misrepresentation of the origin of that material, or
# requiring that modified versions of such material be marked in
# reasonable ways as different from the original version; or
#
# d) Limiting the use for publicity purposes of names of licensors or
# authors of the material; or
#
# e) Declining to grant rights under trademark law for use of some
# trade names, trademarks, or service marks; or
#
# f) Requiring indemnification of licensors and authors of that
# material by anyone who conveys the material (or modified versions of
# it) with contractual assumptions of liability to the recipient, for
# any liability that these contractual assumptions directly impose on
# those licensors and authors.
#
# All other non-permissive additional terms are considered "further
# restrictions" within the meaning of section 10. If the Program as you
# received it, or any part of it, contains a notice stating that it is
# governed by this License along with a term that is a further
# restriction, you may remove that term. If a license document contains
# a further restriction but permits relicensing or conveying under this
# License, you may add to a covered work material governed by the terms
# of that license document, provided that the further restriction does
# not survive such relicensing or conveying.
#
# If you add terms to a covered work in accord with this section, you
# must place, in the relevant source files, a statement of the
# additional terms that apply to those files, or a notice indicating
# where to find the applicable terms.
#
# Additional terms, permissive or non-permissive, may be stated in the
# form of a separately written license, or stated as exceptions;
# the above requirements apply either way.
#
# 8. Termination.
#
# You may not propagate or modify a covered work except as expressly
# provided under this License. Any attempt otherwise to propagate or
# modify it is void, and will automatically terminate your rights under
# this License (including any patent licenses granted under the third
# paragraph of section 11).
#
# However, if you cease all violation of this License, then your
# license from a particular copyright holder is reinstated (a)
# provisionally, unless and until the copyright holder explicitly and
# finally terminates your license, and (b) permanently, if the copyright
# holder fails to notify you of the violation by some reasonable means
# prior to 60 days after the cessation.
#
# Moreover, your license from a particular copyright holder is
# reinstated permanently if the copyright holder notifies you of the
# violation by some reasonable means, this is the first time you have
# received notice of violation of this License (for any work) from that
# copyright holder, and you cure the violation prior to 30 days after
# your receipt of the notice.
#
# Termination of your rights under this section does not terminate the
# licenses of parties who have received copies or rights from you under
# this License. If your rights have been terminated and not permanently
# reinstated, you do not qualify to receive new licenses for the same
# material under section 10.
#
# 9. Acceptance Not Required for Having Copies.
#
# You are not required to accept this License in order to receive or
# run a copy of the Program. Ancillary propagation of a covered work
# occurring solely as a consequence of using peer-to-peer transmission
# to receive a copy likewise does not require acceptance. However,
# nothing other than this License grants you permission to propagate or
# modify any covered work. These actions infringe copyright if you do
# not accept this License. Therefore, by modifying or propagating a
# covered work, you indicate your acceptance of this License to do so.
#
# 10. Automatic Licensing of Downstream Recipients.
#
# Each time you convey a covered work, the recipient automatically
# receives a license from the original licensors, to run, modify and
# propagate that work, subject to this License. You are not responsible
# for enforcing compliance by third parties with this License.
#
# An "entity transaction" is a transaction transferring control of an
# organization, or substantially all assets of one, or subdividing an
# organization, or merging organizations. If propagation of a covered
# work results from an entity transaction, each party to that
# transaction who receives a copy of the work also receives whatever
# licenses to the work the party's predecessor in interest had or could
# give under the previous paragraph, plus a right to possession of the
# Corresponding Source of the work from the predecessor in interest, if
# the predecessor has it or can get it with reasonable efforts.
#
# You may not impose any further restrictions on the exercise of the
# rights granted or affirmed under this License. For example, you may
# not impose a license fee, royalty, or other charge for exercise of
# rights granted under this License, and you may not initiate litigation
# (including a cross-claim or counterclaim in a lawsuit) alleging that
# any patent claim is infringed by making, using, selling, offering for
# sale, or importing the Program or any portion of it.
#
# 11. Patents.
#
# A "contributor" is a copyright holder who authorizes use under this
# License of the Program or a work on which the Program is based. The
# work thus licensed is called the contributor's "contributor version".
#
# A contributor's "essential patent claims" are all patent claims
# owned or controlled by the contributor, whether already acquired or
# hereafter acquired, that would be infringed by some manner, permitted
# by this License, of making, using, or selling its contributor version,
# but do not include claims that would be infringed only as a
# consequence of further modification of the contributor version. For
# purposes of this definition, "control" includes the right to grant
# patent sublicenses in a manner consistent with the requirements of
# this License.
#
# Each contributor grants you a non-exclusive, worldwide, royalty-free
# patent license under the contributor's essential patent claims, to
# make, use, sell, offer for sale, import and otherwise run, modify and
# propagate the contents of its contributor version.
#
# In the following three paragraphs, a "patent license" is any express
# agreement or commitment, however denominated, not to enforce a patent
# (such as an express permission to practice a patent or covenant not to
# sue for patent infringement). To "grant" such a patent license to a
# party means to make such an agreement or commitment not to enforce a
# patent against the party.
#
# If you convey a covered work, knowingly relying on a patent license,
# and the Corresponding Source of the work is not available for anyone
# to copy, free of charge and under the terms of this License, through a
# publicly available network server or other readily accessible means,
# then you must either (1) cause the Corresponding Source to be so
# available, or (2) arrange to deprive yourself of the benefit of the
# patent license for this particular work, or (3) arrange, in a manner
# consistent with the requirements of this License, to extend the patent
# license to downstream recipients. "Knowingly relying" means you have
# actual knowledge that, but for the patent license, your conveying the
# covered work in a country, or your recipient's use of the covered work
# in a country, would infringe one or more identifiable patents in that
# country that you have reason to believe are valid.
#
# If, pursuant to or in connection with a single transaction or
# arrangement, you convey, or propagate by procuring conveyance of, a
# covered work, and grant a patent license to some of the parties
# receiving the covered work authorizing them to use, propagate, modify
# or convey a specific copy of the covered work, then the patent license
# you grant is automatically extended to all recipients of the covered
# work and works based on it.
#
# A patent license is "discriminatory" if it does not include within
# the scope of its coverage, prohibits the exercise of, or is
# conditioned on the non-exercise of one or more of the rights that are
# specifically granted under this License. You may not convey a covered
# work if you are a party to an arrangement with a third party that is
# in the business of distributing software, under which you make payment
# to the third party based on the extent of your activity of conveying
# the work, and under which the third party grants, to any of the
# parties who would receive the covered work from you, a discriminatory
# patent license (a) in connection with copies of the covered work
# conveyed by you (or copies made from those copies), or (b) primarily
# for and in connection with specific products or compilations that
# contain the covered work, unless you entered into that arrangement,
# or that patent license was granted, prior to 28 March 2007.
#
# Nothing in this License shall be construed as excluding or limiting
# any implied license or other defenses to infringement that may
# otherwise be available to you under applicable patent law.
#
# 12. No Surrender of Others' Freedom.
#
# If conditions are imposed on you (whether by court order, agreement or
# otherwise) that contradict the conditions of this License, they do not
# excuse you from the conditions of this License. If you cannot convey a
# covered work so as to satisfy simultaneously your obligations under this
# License and any other pertinent obligations, then as a consequence you may
# not convey it at all. For example, if you agree to terms that obligate you
# to collect a royalty for further conveying from those to whom you convey
# the Program, the only way you could satisfy both those terms and this
# License would be to refrain entirely from conveying the Program.
#
# 13. Use with the GNU Affero General Public License.
#
# Notwithstanding any other provision of this License, you have
# permission to link or combine any covered work with a work licensed
# under version 3 of the GNU Affero General Public License into a single
# combined work, and to convey the resulting work. The terms of this
# License will continue to apply to the part which is the covered work,
# but the special requirements of the GNU Affero General Public License,
# section 13, concerning interaction through a network will apply to the
# combination as such.
#
# 14. Revised Versions of this License.
#
# The Free Software Foundation may publish revised and/or new versions of
# the GNU General Public License from time to time. Such new versions will
# be similar in spirit to the present version, but may differ in detail to
# address new problems or concerns.
#
# Each version is given a distinguishing version number. If the
# Program specifies that a certain numbered version of the GNU General
# Public License "or any later version" applies to it, you have the
# option of following the terms and conditions either of that numbered
# version or of any later version published by the Free Software
# Foundation. If the Program does not specify a version number of the
# GNU General Public License, you may choose any version ever published
# by the Free Software Foundation.
#
# If the Program specifies that a proxy can decide which future
# versions of the GNU General Public License can be used, that proxy's
# public statement of acceptance of a version permanently authorizes you
# to choose that version for the Program.
#
# Later license versions may give you additional or different
# permissions. However, no additional obligations are imposed on any
# author or copyright holder as a result of your choosing to follow a
# later version.
#
# 15. Disclaimer of Warranty.
#
# THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
# APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
# HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
# OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
# IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
# ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
#
# 16. Limitation of Liability.
#
# IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
# WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
# THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
# GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
# USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
# DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
# PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
# EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 17. Interpretation of Sections 15 and 16.
#
# If the disclaimer of warranty and limitation of liability provided
# above cannot be given local legal effect according to their terms,
# reviewing courts shall apply local law that most closely approximates
# an absolute waiver of all civil liability in connection with the
# Program, unless a warranty or assumption of liability accompanies a
# copy of the Program in return for a fee.
#
# END OF TERMS AND CONDITIONS
#
# How to Apply These Terms to Your New Programs
#
# If you develop a new program, and you want it to be of the greatest
# possible use to the public, the best way to achieve this is to make it
# free software which everyone can redistribute and change under these terms.
#
# To do so, attach the following notices to the program. It is safest
# to attach them to the start of each source file to most effectively
# state the exclusion of warranty; and each file should have at least
# the "copyright" line and a pointer to where the full notice is found.
#
# {one line to give the program's name and a brief idea of what it does.}
# Copyright (C) {year} {name of author}
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Also add information on how to contact you by electronic and paper mail.
#
# If the program does terminal interaction, make it output a short
# notice like this when it starts in an interactive mode:
#
# {project} Copyright (C) {year} {fullname}
# This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
# This is free software, and you are welcome to redistribute it
# under certain conditions; type `show c' for details.
#
# The hypothetical commands `show w' and `show c' should show the appropriate
# parts of the General Public License. Of course, your program's commands
# might be different; for a GUI interface, you would use an "about box".
#
# You should also get your employer (if you work as a programmer) or school,
# if any, to sign a "copyright disclaimer" for the program, if necessary.
# For more information on this, and how to apply and follow the GNU GPL, see
# <http://www.gnu.org/licenses/>.
#
# The GNU General Public License does not permit incorporating your program
# into proprietary programs. If your program is a subroutine library, you
# may consider it more useful to permit linking proprietary applications with
# the library. If this is what you want to do, use the GNU Lesser General
# Public License instead of this License. But first, please read
# <http://www.gnu.org/philosophy/why-not-lgpl.html>.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import lora_swig as lora
class qa_receiver (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_receiver, "qa_receiver.xml")
|
gpl-3.0
|
jiguanglizipao/ucore_os_lab
|
related_info/ostep/ostep1-relocation.py
|
54
|
3914
|
#! /usr/bin/env python
import sys
from optparse import OptionParser
import random
import math
def convert(size):
length = len(size)
lastchar = size[length-1]
if (lastchar == 'k') or (lastchar == 'K'):
m = 1024
nsize = int(size[0:length-1]) * m
elif (lastchar == 'm') or (lastchar == 'M'):
m = 1024*1024
nsize = int(size[0:length-1]) * m
elif (lastchar == 'g') or (lastchar == 'G'):
m = 1024*1024*1024
nsize = int(size[0:length-1]) * m
else:
nsize = int(size)
return nsize
#
# main program
#
parser = OptionParser()
parser.add_option('-s', '--seed', default=0, help='the random seed', action='store', type='int', dest='seed')
parser.add_option('-a', '--asize', default='1k', help='address space size (e.g., 16, 64k, 32m, 1g)', action='store', type='string', dest='asize')
parser.add_option('-p', '--physmem', default='16k', help='physical memory size (e.g., 16, 64k, 32m, 1g)', action='store', type='string', dest='psize')
parser.add_option('-n', '--addresses', default=5, help='number of virtual addresses to generate', action='store', type='int', dest='num')
parser.add_option('-b', '--b', default='-1', help='value of base register', action='store', type='string', dest='base')
parser.add_option('-l', '--l', default='-1', help='value of limit register', action='store', type='string', dest='limit')
parser.add_option('-c', '--compute', default=False, help='compute answers for me', action='store_true', dest='solve')
(options, args) = parser.parse_args()
print ''
print 'ARG seed', options.seed
print 'ARG address space size', options.asize
print 'ARG phys mem size', options.psize
print ''
random.seed(options.seed)
asize = convert(options.asize)
psize = convert(options.psize)
if psize <= 1:
print 'Error: must specify a non-zero physical memory size.'
exit(1)
if asize == 0:
print 'Error: must specify a non-zero address-space size.'
exit(1)
if psize <= asize:
print 'Error: physical memory size must be GREATER than address space size (for this simulation)'
exit(1)
#
# need to generate base, bounds for segment registers
#
limit = convert(options.limit)
base = convert(options.base)
if limit == -1:
limit = int(asize/4.0 + (asize/4.0 * random.random()))
# now have to find room for them
if base == -1:
done = 0
while done == 0:
base = int(psize * random.random())
if (base + limit) < psize:
done = 1
print 'Base-and-Bounds register information:'
print ''
print ' Base : 0x%08x (decimal %d)' % (base, base)
print ' Limit : %d' % (limit)
print ''
if base + limit > psize:
print 'Error: address space does not fit into physical memory with those base/bounds values.'
print 'Base + Limit:', base + limit, ' Psize:', psize
exit(1)
#
# now, need to generate virtual address trace
#
print 'Virtual Address Trace'
for i in range(0,options.num):
vaddr = int(asize * random.random())
if options.solve == False:
print ' VA %2d: 0x%08x (decimal: %4d) --> PA or segmentation violation?' % (i, vaddr, vaddr)
else:
paddr = 0
if (vaddr >= limit):
print ' VA %2d: 0x%08x (decimal: %4d) --> SEGMENTATION VIOLATION' % (i, vaddr, vaddr)
else:
paddr = vaddr + base
print ' VA %2d: 0x%08x (decimal: %4d) --> VALID: 0x%08x (decimal: %4d)' % (i, vaddr, vaddr, paddr, paddr)
print ''
if options.solve == False:
print 'For each virtual address, either write down the physical address it translates to'
print 'OR write down that it is an out-of-bounds address (a segmentation violation). For'
print 'this problem, you should assume a simple virtual address space of a given size.'
print ''
|
gpl-2.0
|
burkesquires/pyeq2
|
Examples/Web/FlaskFit.py
|
1
|
15008
|
import os, sys, inspect
# ensure pyeq2 can be imported
if -1 != sys.path[0].find('pyeq2-master'):raise Exception('Please rename git checkout directory from "pyeq2-master" to "pyeq2"')
exampleFileDirectory = sys.path[0][:sys.path[0].rfind(os.sep)]
pyeq2ImportDirectory = os.path.join(os.path.join(exampleFileDirectory, '..'), '..')
if pyeq2ImportDirectory not in sys.path:
sys.path.append(pyeq2ImportDirectory)
import pyeq2, GraphUtils, TextUtils
from flask import Flask
from flask import request
# override Flask's default file cache for the files we generate
class MyFlask(Flask):
def get_send_file_max_age(self, name):
if name.lower().endswith('.png'):
return 0.000001
if name.lower().endswith('.txt'):
return 0.000001
if name.lower().endswith('.html'):
return 0.000001
return Flask.get_send_file_max_age(self, name)
app = MyFlask(__name__)
app.debug = True # only for development, never for production
@app.route('/')
def test_curve_fiting_and_plotting():
# HTML for 2D fitter form
htmlToReturn_2Dform = '''
<table border=1 cellpadding=20>
<tr><td><b>Example 2D f(x) Web Fitter</b></td></tr>
<tr><td>
<form action="/simplefitter_2D" method="post" target=_blank>
--- 2D Text Data ---<br>
<textarea rows="10" cols="45" name="textdata" wrap=off>
Example 2D data for testing
X Y
5.357 10.376
5.457 10.489
5.936 11.049
6.161 11.327 ending text is ignored
6.697 12.054
8.442 14.744
9.769 17.068
9.861 17.104
</textarea>
<br><br>
--- Example 2D Equations ---<br>
<input type="radio" name="equation" value="Linear" checked>Linear Polynomial<br>
<input type="radio" name="equation" value="Quadratic">Quadratic Polynomial<br>
<input type="radio" name="equation" value="Cubic">Cubic Polynomial<br>
<input type="radio" name="equation" value="WitchA">Witch Of Maria Agnesi A<br>
<input type="radio" name="equation" value="VanDeemter">VanDeemter Chromatography<br>
<input type="radio" name="equation" value="GammaRayDegreesB">Gamma Ray Angular Distribution (degrees) B<br>
<input type="radio" name="equation" value="ExponentialWithOffset">Exponential With Offset<br>
<br>
<table><tr>
<td>
<input type="submit" value="Submit">
</td>
<td align="left">
<input type="radio" name="target" value="SSQABS" checked>Lowest Sum Of Squared Absolute Error<br>
<input type="radio" name="target" value="SSQREL">Lowest Sum Of Squared Relative Error<br>
<input type="radio" name="target" value="ODR">Lowest Sum Of Squared Orthogonal Distance<br>
</td>
</tr></table>
</form>
<br><br>
<a href="/equationlist_2D">Link to all standard 2D equations</a>
</td></tr></table>
'''
# HTML for 3D fitter form
htmlToReturn_3Dform = '''
<table border=1 cellpadding=20>
<tr><td><b>Example 3D f(x,y) Web Fitter</b></td></tr>
<tr><td>
<form action="/simplefitter_3D" method="post" target=_blank>
--- 3D Text Data ---<br>
<textarea rows="10" cols="45" name="textdata" wrap=off>
Example 3D data for testing
X Y Z
3.017 2.175 0.0320
2.822 2.624 0.0629
1.784 3.144 6.570
1.712 3.153 6.721
2.972 2.106 0.0313
2.719 2.542 0.0643
2.0 2.6 4.0 ending text is ignored
1.479 2.957 6.583
1.387 2.963 6.744
2.843 1.984 0.0315
2.485 2.320 0.0639
0.742 2.568 6.581
0.607 2.571 6.753
</textarea>
<br><br>
--- Example 3D Equations ---<br>
<input type="radio" name="equation" value="Linear" checked>Linear Polynomial<br>
<input type="radio" name="equation" value="FullQuadratic">Full Quadratic Polynomial<br>
<input type="radio" name="equation" value="FullCubic">Full Cubic Polynomial<br>
<input type="radio" name="equation" value="MonkeySaddleA">Monkey Saddle A<br>
<input type="radio" name="equation" value="GaussianCurvatureOfWhitneysUmbrellaA">Gaussian Curvature Of Whitneys Umbrella A<br>
<input type="radio" name="equation" value="NIST_NelsonAutolog">NIST Nelson Autolog<br>
<input type="radio" name="equation" value="CustomPolynomialOne">Custom Polynomial One<br>
<br>
<table><tr>
<td>
<input type="submit" value="Submit">
</td>
<td align="left">
<input type="radio" name="target" value="SSQABS" checked>Lowest Sum Of Squared Absolute Error<br>
<input type="radio" name="target" value="SSQREL">Lowest Sum Of Squared Relative Error<br>
<input type="radio" name="target" value="ODR">Lowest Sum Of Squared Orthogonal Distance<br>
</td>
</tr></table>
</form>
<br><br>
<a href="/equationlist_3D">Link to all standard 3D equations</a>
</td></tr></table>
'''
# return HTML to Flask as a web page
s = '<html><body>'
s += '<table><tr>'
s += '<td>' + htmlToReturn_2Dform + '</td>'
s += '<td> </td>'
s += '<td>' + htmlToReturn_3Dform + '</td>'
s += '</tr></table>'
s +='</body></html>'
return s
@app.route('/simplefitter_2D', methods=['POST'])
def simplefitter_2D_NoFormDataValidation():
formTextData = request.form['textdata']
formEquation = request.form['equation']
formFittingTarget = request.form['target']
if formEquation == 'Linear':
equation = pyeq2.Models_2D.Polynomial.Linear(formFittingTarget)
elif formEquation == 'Quadratic':
equation = pyeq2.Models_2D.Polynomial.Quadratic(formFittingTarget)
elif formEquation == 'Cubic':
equation = pyeq2.Models_2D.Polynomial.Cubic(formFittingTarget)
elif formEquation == 'WitchA':
equation = pyeq2.Models_2D.Miscellaneous.WitchOfAgnesiA(formFittingTarget)
elif formEquation == 'VanDeemter':
equation = pyeq2.Models_2D.Engineering.VanDeemterChromatography(formFittingTarget)
elif formEquation == 'GammaRayDegreesB':
equation = pyeq2.Models_2D.LegendrePolynomial.GammaRayAngularDistributionDegreesB(formFittingTarget)
elif formEquation == 'ExponentialWithOffset':
equation = pyeq2.Models_2D.Exponential.Exponential(formFittingTarget, 'Offset')
# the name of the data here is from the form
# check for functions requiring non-zero nor non-negative data such as 1/x, etc.
try:
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(formTextData, equation, False)
except:
return equation.reasonWhyDataRejected
# check for number of coefficients > number of data points to be fitted
coeffCount = len(equation.GetCoefficientDesignators())
dataCount = len(equation.dataCache.allDataCacheDictionary['DependentData'])
if coeffCount > dataCount:
return "This equation requires a minimum of " + repr(coeffCount) + " data points, you supplied " + repr(dataCount) + "."
equation.Solve()
equation.CalculateModelErrors(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
equation.CalculateCoefficientAndFitStatistics()
# save fit statistics to a text file
fitStatisticsFilePath = "static/fitstatistics_simplefitter_2D.txt" # simplefitter_2D
TextUtils.SaveCoefficientAndFitStatistics(fitStatisticsFilePath, equation)
# save source code to a single text file, all available languages
sourceCodeFilePath = "static/sourcecode_simplefitter_2D.html" # simplefitter_2D
TextUtils.SaveSourceCode(sourceCodeFilePath, equation)
# create graph
graphFilePath = "static/model_and_scatterplot_simplefitter_2D.png" # simplefitter_2D
title = "Example Of An HTML FORM Model"
xAxisLabel = "X data"
yAxisLabel = "Y data"
GraphUtils.SaveModelScatterConfidence(graphFilePath,
equation, title, xAxisLabel, yAxisLabel)
absErrorPlotFilePath = "static/abs_error_simplefitter_2D.png" # simplefitter_2D
title = "Absolute Error For An HTML FORM Model"
GraphUtils.SaveAbsErrorScatterPlot(absErrorPlotFilePath, equation, title, yAxisLabel)
if equation.dataCache.DependentDataContainsZeroFlag != 1:
percentErrorPlotFilePath = "static/percent_error_simplefitter_2D.png" # simplefitter_2D
title = "Percent Error For An HTML FORM Model"
GraphUtils.SavePercentErrorScatterPlot(percentErrorPlotFilePath, equation, title, yAxisLabel)
# generate HTML
htmlToReturn = ''
htmlToReturn += equation.GetDisplayName() + '<br><br>\n'
htmlToReturn += equation.GetDisplayHTML() + '<br><br>\n'
htmlToReturn += '<a href="' + fitStatisticsFilePath + '">Link to parameter and fit statistics</a><br><br>\n'
htmlToReturn += '<a href="' + sourceCodeFilePath + '">Link to source code, all available languages</a><br><br>\n'
htmlToReturn += '<img src="' + graphFilePath + '"> <br>\n'
htmlToReturn += '<img src="' + absErrorPlotFilePath + '"><br>\n'
if equation.dataCache.DependentDataContainsZeroFlag != 1:
htmlToReturn += '<img src="' + percentErrorPlotFilePath + '"><br><br>\n'
return '<html><body>' + htmlToReturn + '</body></html>'
@app.route('/simplefitter_3D', methods=['POST'])
def simplefitter_3D_NoFormDataValidation():
formTextData = request.form['textdata']
formEquation = request.form['equation']
formFittingTarget = request.form['target']
if formEquation == 'Linear':
equation = pyeq2.Models_3D.Polynomial.Linear(formFittingTarget)
elif formEquation == 'FullQuadratic':
equation = pyeq2.Models_3D.Polynomial.FullQuadratic(formFittingTarget)
elif formEquation == 'FullCubic':
equation = pyeq2.Models_3D.Polynomial.FullCubic(formFittingTarget)
elif formEquation == 'MonkeySaddleA':
equation = pyeq2.Models_3D.Miscellaneous.MonkeySaddleA(formFittingTarget)
elif formEquation == 'GaussianCurvatureOfWhitneysUmbrellaA':
equation = pyeq2.Models_3D.Miscellaneous.GaussianCurvatureOfWhitneysUmbrellaA(formFittingTarget)
elif formEquation == 'NIST_NelsonAutolog':
equation = pyeq2.Models_3D.NIST.NIST_NelsonAutolog(formFittingTarget)
elif formEquation == 'CustomPolynomialOne': # X order 3, Y order 1 in this example - passed as integers
equation = pyeq2.Models_3D.Polynomial.UserSelectablePolynomial(formFittingTarget, "Default", 3, 1)
# the name of the data here is from the form
# check for functions requiring non-zero nor non-negative data such as 1/x, etc.
try:
pyeq2.dataConvertorService().ConvertAndSortColumnarASCII(formTextData, equation, False)
except:
return equation.reasonWhyDataRejected
# check for number of coefficients > number of data points to be fitted
coeffCount = len(equation.GetCoefficientDesignators())
dataCount = len(equation.dataCache.allDataCacheDictionary['DependentData'])
if coeffCount > dataCount:
return "This equation requires a minimum of " + repr(coeffCount) + " data points, you supplied " + repr(dataCount) + "."
equation.Solve()
equation.CalculateModelErrors(equation.solvedCoefficients, equation.dataCache.allDataCacheDictionary)
equation.CalculateCoefficientAndFitStatistics()
# save fit statistics to a text file
fitStatisticsFilePath = "static/fitstatistics_simplefitter_3D.txt" # simplefitter_3D
TextUtils.SaveCoefficientAndFitStatistics(fitStatisticsFilePath, equation)
# save source code to a single text file, all available languages
sourceCodeFilePath = "static/sourcecode_simplefitter_3D.html" # simplefitter_3D
TextUtils.SaveSourceCode(sourceCodeFilePath, equation)
# create graphs
graphFilePath_Surface = "static/surface.png" # surface plot
graphFilePath_Contour = "static/contour.png" # contour plot
surfaceTitle = "Example Surface Plot"
contourTitle = "Example Contour Plot"
xAxisLabel = "X data"
yAxisLabel = "Y data"
zAxisLabel = "Z data"
GraphUtils.SurfaceAndContourPlots(graphFilePath_Surface,
graphFilePath_Contour,
equation, surfaceTitle, contourTitle,
xAxisLabel, yAxisLabel, zAxisLabel)
absErrorPlotFilePath = "static/abs_error_simplefitter_3D.png" # simplefitter_3D
title = "Absolute Error For An HTML FORM Model"
GraphUtils.SaveAbsErrorScatterPlot(absErrorPlotFilePath, equation, title, zAxisLabel)
if equation.dataCache.DependentDataContainsZeroFlag != 1:
percentErrorPlotFilePath = "static/percent_error_simplefitter_3D.png" # simplefitter_3D
title = "Percent Error For An HTML FORM Model"
GraphUtils.SavePercentErrorScatterPlot(percentErrorPlotFilePath, equation, title, zAxisLabel)
# generate HTML
htmlToReturn = ''
htmlToReturn += equation.GetDisplayName() + '<br><br>\n'
htmlToReturn += equation.GetDisplayHTML() + '<br><br>\n'
htmlToReturn += '<a href="' + fitStatisticsFilePath + '">Link to parameter and fit statistics</a><br><br>\n'
htmlToReturn += '<a href="' + sourceCodeFilePath + '">Link to source code, all available languages</a><br><br>\n'
htmlToReturn += '<img src="' + graphFilePath_Surface + '"><br><br>\n'
htmlToReturn += '<img src="' + graphFilePath_Contour + '"><br><br>\n'
htmlToReturn += '<img src="' + absErrorPlotFilePath + '"><br><br>\n'
if equation.dataCache.DependentDataContainsZeroFlag != 1:
htmlToReturn += '<img src="' + percentErrorPlotFilePath + '"><br><br>\n'
return '<html><body>' + htmlToReturn + '</body></html>'
@app.route('/equationlist_2D', methods=['GET'])
def equationlist_2D():
htmlToReturn = '' # build this as we progress
for submodule in inspect.getmembers(pyeq2.Models_2D):
if inspect.ismodule(submodule[1]):
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
for extendedVersionName in ['Default', 'Offset']:
if (-1 != extendedVersionName.find('Offset')) and (equationClass[1].autoGenerateOffsetForm == False):
continue
equation = equationClass[1]('SSQABS', extendedVersionName)
htmlToReturn += '2D ' + submodule[0] + ' --- ' + equation.GetDisplayName() + '<br>\n'
return '<html><body>' + htmlToReturn + '</body></html>'
@app.route('/equationlist_3D', methods=['GET'])
def equationlist_3D():
htmlToReturn = '' # build this as we progress
for submodule in inspect.getmembers(pyeq2.Models_3D):
if inspect.ismodule(submodule[1]):
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
for extendedVersionName in ['Default', 'Offset']:
if (-1 != extendedVersionName.find('Offset')) and (equationClass[1].autoGenerateOffsetForm == False):
continue
equation = equationClass[1]('SSQABS', extendedVersionName)
htmlToReturn += '3D ' + submodule[0] + ' --- ' + equation.GetDisplayName() + '<br>\n'
return '<html><body>' + htmlToReturn + '</body></html>'
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
bsd-2-clause
|
jainayush975/zulip
|
zerver/views/registration.py
|
4
|
19018
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from typing import Any, List, Dict, Optional, Text
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse, HttpRequest
from django.shortcuts import redirect, render
from django.template import RequestContext, loader
from django.utils.timezone import now
from django.core.exceptions import ValidationError
from django.core import validators
from django.core.mail import send_mail
from zerver.models import UserProfile, Realm, PreregistrationUser, \
name_changes_disabled, email_to_username, \
completely_open, get_unique_open_realm, email_allowed_for_realm, \
get_realm, get_realm_by_email_domain
from zerver.lib.events import do_events_register
from zerver.lib.actions import do_change_password, do_change_full_name, do_change_is_admin, \
do_activate_user, do_create_user, do_create_realm, set_default_streams, \
user_email_is_unique, \
compute_mit_user_fullname
from zerver.forms import RegistrationForm, HomepageForm, RealmCreationForm, \
CreateUserForm, FindMyTeamForm
from zerver.lib.actions import is_inactive
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.decorator import require_post, has_request_variables, \
JsonableError, get_user_profile_by_email, REQ
from zerver.lib.response import json_success
from zerver.lib.utils import get_subdomain
from zproject.backends import password_auth_enabled
from confirmation.models import Confirmation, RealmCreationKey, check_key_is_valid
import logging
import requests
import ujson
from six.moves import urllib
def redirect_and_log_into_subdomain(realm, full_name, email_address):
# type: (Realm, Text, Text) -> HttpResponse
subdomain_login_uri = ''.join([
realm.uri,
reverse('zerver.views.auth.log_into_subdomain')
])
domain = '.' + settings.EXTERNAL_HOST.split(':')[0]
response = redirect(subdomain_login_uri)
data = {'name': full_name, 'email': email_address, 'subdomain': realm.subdomain}
# Creating a singed cookie so that it cannot be tampered with.
# Cookie and the signature expire in 15 seconds.
response.set_signed_cookie('subdomain.signature',
ujson.dumps(data),
expires=15,
domain=domain,
salt='zerver.views.auth')
return response
@require_post
def accounts_register(request):
# type: (HttpRequest) -> HttpResponse
key = request.POST['key']
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
email = prereg_user.email
realm_creation = prereg_user.realm_creation
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
validators.validate_email(email)
# If OPEN_REALM_CREATION is enabled all user sign ups should go through the
# special URL with domain name so that REALM can be identified if multiple realms exist
unique_open_realm = get_unique_open_realm()
if unique_open_realm is not None:
realm = unique_open_realm
elif prereg_user.referred_by:
# If someone invited you, you are joining their realm regardless
# of your e-mail address.
realm = prereg_user.referred_by.realm
elif prereg_user.realm:
# You have a realm set, even though nobody referred you. This
# happens if you sign up through a special URL for an open realm.
realm = prereg_user.realm
elif realm_creation:
# For creating a new realm, there is no existing realm or domain
realm = None
elif settings.REALMS_HAVE_SUBDOMAINS:
realm = get_realm(get_subdomain(request))
else:
realm = get_realm_by_email_domain(email)
if realm and not email_allowed_for_realm(email, realm):
return render(request, "zerver/closed_realm.html",
context={"closed_domain_name": realm.name})
if realm and realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return render(request, "zerver/deactivated.html",
context={"deactivated_domain_name": realm.name,
"zulip_administrator": settings.ZULIP_ADMINISTRATOR})
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' +
urllib.parse.quote_plus(email))
name_validated = False
full_name = None
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
if realm is not None and realm.is_zephyr_mirror_realm:
# For MIT users, we can get an authoritative name from Hesiod.
# Technically we should check that this is actually an MIT
# realm, but we can cross that bridge if we ever get a non-MIT
# zephyr mirroring realm.
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""})
name_validated = True
elif settings.POPULATE_PROFILE_VIA_LDAP:
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
try:
ldap_full_name = ldap_attrs[settings.AUTH_LDAP_USER_ATTR_MAP['full_name']][0]
request.session['authenticated_full_name'] = ldap_full_name
name_validated = True
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm({'full_name': ldap_full_name})
# FIXME: This will result in the user getting
# validation errors if they have to enter a password.
# Not relevant for ONLY_SSO, though.
break
except TypeError:
# Let the user fill out a name and/or try another backend
form = RegistrationForm()
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')}
)
else:
form = RegistrationForm()
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
# back to the full name in the request.
try:
postdata.update({'full_name': request.session['authenticated_full_name']})
name_validated = True
except KeyError:
pass
form = RegistrationForm(postdata)
if not password_auth_enabled(realm):
form['password'].field.required = False
if form.is_valid():
if password_auth_enabled(realm):
password = form.cleaned_data['password']
else:
# SSO users don't need no passwords
password = None
if realm_creation:
string_id = form.cleaned_data['realm_subdomain']
realm_name = form.cleaned_data['realm_name']
org_type = int(form.cleaned_data['realm_org_type'])
realm = do_create_realm(string_id, realm_name, org_type=org_type)[0]
set_default_streams(realm, settings.DEFAULT_NEW_REALM_STREAMS)
full_name = form.cleaned_data['full_name']
short_name = email_to_username(email)
first_in_realm = len(UserProfile.objects.filter(realm=realm, is_bot=False)) == 0
# FIXME: sanitize email addresses and fullname
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
user_profile = existing_user_profile
do_activate_user(user_profile)
do_change_password(user_profile, password)
do_change_full_name(user_profile, full_name)
else:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
tos_version=settings.TOS_VERSION,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
if first_in_realm:
do_change_is_admin(user_profile, True)
if realm_creation and settings.REALMS_HAVE_SUBDOMAINS:
# Because for realm creation, registration happens on the
# root domain, we need to log them into the subdomain for
# their new realm.
return redirect_and_log_into_subdomain(realm, full_name, email)
# This dummy_backend check below confirms the user is
# authenticating to the correct subdomain.
return_data = {} # type: Dict[str, bool]
auth_result = authenticate(username=user_profile.email,
realm_subdomain=realm.subdomain,
return_data=return_data,
use_dummy_backend=True)
if return_data.get('invalid_subdomain'):
# By construction, this should never happen.
logging.error("Subdomain mismatch in registration %s: %s" % (
realm.subdomain, user_profile.email,))
return redirect('/')
login(request, auth_result)
return HttpResponseRedirect(realm.uri + reverse('zerver.views.home.home'))
return render(
request,
'zerver/register.html',
context={'form': form,
'email': email,
'key': key,
'full_name': request.session.get('authenticated_full_name', None),
'lock_name': name_validated and name_changes_disabled(realm),
# password_auth_enabled is normally set via our context processor,
# but for the registration form, there is no logged in user yet, so
# we have to set it here.
'creating_new_team': realm_creation,
'realms_have_subdomains': settings.REALMS_HAVE_SUBDOMAINS,
'password_auth_enabled': password_auth_enabled(realm),
}
)
def create_preregistration_user(email, request, realm_creation=False):
# type: (Text, HttpRequest, bool) -> HttpResponse
realm_str = request.session.pop('realm_str', None)
if realm_str is not None:
# realm_str was set in accounts_home_with_realm_str.
# The user is trying to sign up for a completely open realm,
# so create them a PreregistrationUser for that realm
return PreregistrationUser.objects.create(email=email,
realm=get_realm(realm_str),
realm_creation=realm_creation)
return PreregistrationUser.objects.create(email=email, realm_creation=realm_creation)
def accounts_home_with_realm_str(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
if not settings.REALMS_HAVE_SUBDOMAINS and completely_open(get_realm(realm_str)):
# You can sign up for a completely open realm through a
# special registration path that contains the domain in the
# URL. We store this information in the session rather than
# elsewhere because we don't have control over URL or form
# data for folks registering through OpenID.
request.session["realm_str"] = realm_str
return accounts_home(request)
else:
return HttpResponseRedirect(reverse('zerver.views.registration.accounts_home'))
def send_registration_completion_email(email, request, realm_creation=False):
# type: (str, HttpRequest, bool) -> Confirmation
"""
Send an email with a confirmation link to the provided e-mail so the user
can complete their registration.
"""
prereg_user = create_preregistration_user(email, request, realm_creation)
context = {'support_email': settings.ZULIP_ADMINISTRATOR,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS}
return Confirmation.objects.send_confirmation(prereg_user, email,
additional_context=context,
host=request.get_host())
def redirect_to_email_login_url(email):
# type: (str) -> HttpResponseRedirect
login_url = reverse('django.contrib.auth.views.login')
redirect_url = login_url + '?email=' + urllib.parse.quote_plus(email)
return HttpResponseRedirect(redirect_url)
def create_realm(request, creation_key=None):
# type: (HttpRequest, Optional[Text]) -> HttpResponse
if not settings.OPEN_REALM_CREATION:
if creation_key is None:
return render(request, "zerver/realm_creation_failed.html",
context={'message': _('New organization creation disabled.')})
elif not check_key_is_valid(creation_key):
return render(request, "zerver/realm_creation_failed.html",
context={'message': _('The organization creation link has expired'
' or is not valid.')})
# When settings.OPEN_REALM_CREATION is enabled, anyone can create a new realm,
# subject to a few restrictions on their email address.
if request.method == 'POST':
form = RealmCreationForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
confirmation_key = send_registration_completion_email(email, request, realm_creation=True).confirmation_key
if settings.DEVELOPMENT:
request.session['confirmation_key'] = {'confirmation_key': confirmation_key}
if (creation_key is not None and check_key_is_valid(creation_key)):
RealmCreationKey.objects.get(creation_key=creation_key).delete()
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
user_email_is_unique(email)
except ValidationError:
# Maybe the user is trying to log in
return redirect_to_email_login_url(email)
else:
form = RealmCreationForm()
return render(request,
'zerver/create_realm.html',
context={'form': form, 'current_url': request.get_full_path},
)
def confirmation_key(request):
# type: (HttpRequest) -> HttpResponse
return json_success(request.session.get('confirmation_key'))
def get_realm_from_request(request):
# type: (HttpRequest) -> Realm
if settings.REALMS_HAVE_SUBDOMAINS:
realm_str = get_subdomain(request)
else:
realm_str = request.session.get("realm_str")
return get_realm(realm_str)
def accounts_home(request):
# type: (HttpRequest) -> HttpResponse
realm = get_realm_from_request(request)
if request.method == 'POST':
form = HomepageForm(request.POST, realm=realm)
if form.is_valid():
email = form.cleaned_data['email']
send_registration_completion_email(email, request)
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
# Note: We don't check for uniqueness
is_inactive(email)
except ValidationError:
return redirect_to_email_login_url(email)
else:
form = HomepageForm(realm=realm)
return render(request,
'zerver/accounts_home.html',
context={'form': form, 'current_url': request.get_full_path},
)
def generate_204(request):
# type: (HttpRequest) -> HttpResponse
return HttpResponse(content=None, status=204)
def send_find_my_team_emails(user_profile):
# type: (UserProfile) -> None
text_template = 'zerver/emails/find_team/find_team_email.txt'
html_template = 'zerver/emails/find_team/find_team_email.html'
context = {'user_profile': user_profile}
text_content = loader.render_to_string(text_template, context)
html_content = loader.render_to_string(html_template, context)
sender = settings.NOREPLY_EMAIL_ADDRESS
recipients = [user_profile.email]
subject = loader.render_to_string('zerver/emails/find_team/find_team_email.subject').strip()
send_mail(subject, text_content, sender, recipients, html_message=html_content)
def find_my_team(request):
# type: (HttpRequest) -> HttpResponse
url = reverse('zerver.views.registration.find_my_team')
emails = [] # type: List[Text]
if request.method == 'POST':
form = FindMyTeamForm(request.POST)
if form.is_valid():
emails = form.cleaned_data['emails']
for user_profile in UserProfile.objects.filter(email__in=emails):
send_find_my_team_emails(user_profile)
# Note: Show all the emails in the result otherwise this
# feature can be used to ascertain which email addresses
# are associated with Zulip.
data = urllib.parse.urlencode({'emails': ','.join(emails)})
return redirect(url + "?" + data)
else:
form = FindMyTeamForm()
result = request.GET.get('emails')
if result:
for email in result.split(','):
try:
validators.validate_email(email)
emails.append(email)
except ValidationError:
pass
return render(request,
'zerver/find_my_team.html',
context={'form': form, 'current_url': lambda: url,
'emails': emails},)
|
apache-2.0
|
kenwmitchell/ansible-modules-core
|
files/stat.py
|
18
|
15327
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: stat
version_added: "1.3"
short_description: retrieve file or file system status
description:
- Retrieves facts for a file similar to the linux/unix 'stat' command.
options:
path:
description:
- The full path of the file/object to get the facts of
required: true
default: null
follow:
description:
- Whether to follow symlinks
required: false
default: no
get_md5:
description:
- Whether to return the md5 sum of the file. Will return None if we're unable to use md5 (Common for FIPS-140 compliant systems)
required: false
default: yes
get_checksum:
description:
- Whether to return a checksum of the file (default sha1)
required: false
default: yes
version_added: "1.8"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm.
required: false
choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ]
default: sha1
aliases: [ 'checksum_algo', 'checksum' ]
version_added: "2.0"
mime:
description:
- Use file magic and return data about the nature of the file. this uses the 'file' utility found on most Linux/Unix systems.
- This will add both `mime_type` and 'charset' fields to the return, if possible.
required: false
choices: [ Yes, No ]
default: No
version_added: "2.1"
aliases: [ 'mime_type', 'mime-type' ]
author: "Bruce Pennypacker (@bpennypacker)"
'''
EXAMPLES = '''
# Obtain the stats of /etc/foo.conf, and check that the file still belongs
# to 'root'. Fail otherwise.
- stat: path=/etc/foo.conf
register: st
- fail: msg="Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a symlink. Note that if the path does
# not exist, and we test sym.stat.islnk, it will fail with an error. So
# therefore, we must test whether it is defined.
# Run this to understand the structure, the skipped ones do not pass the
# check performed by 'when'
- stat: path=/path/to/something
register: sym
- debug: msg="islnk isn't defined (path doesn't exist)"
when: sym.stat.islnk is not defined
- debug: msg="islnk is defined (path must exist)"
when: sym.stat.islnk is defined
- debug: msg="Path exists and is a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk
- debug: msg="Path exists and isn't a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk == False
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
- stat: path=/path/to/something
register: p
- debug: msg="Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum
- stat: path=/path/to/myhugefile get_md5=no
# Use sha256 to calculate checksum
- stat: path=/path/to/something checksum_algorithm=sha256
'''
RETURN = '''
stat:
description: dictionary containing all the stat data
returned: success
type: dictionary
contains:
exists:
description: if the destination path actually exists or not
returned: success
type: boolean
sample: True
path:
description: The full path of the file/object to get the facts of
returned: success and if path exists
type: string
sample: '/path/to/file'
mode:
description: Unix permissions of the file in octal
returned: success, path exists and user can read stats
type: octal
sample: 1755
isdir:
description: Tells you if the path is a directory
returned: success, path exists and user can read stats
type: boolean
sample: False
ischr:
description: Tells you if the path is a character device
returned: success, path exists and user can read stats
type: boolean
sample: False
isblk:
description: Tells you if the path is a block device
returned: success, path exists and user can read stats
type: boolean
sample: False
isreg:
description: Tells you if the path is a regular file
returned: success, path exists and user can read stats
type: boolean
sample: True
isfifo:
description: Tells you if the path is a named pipe
returned: success, path exists and user can read stats
type: boolean
sample: False
islnk:
description: Tells you if the path is a symbolic link
returned: success, path exists and user can read stats
type: boolean
sample: False
issock:
description: Tells you if the path is a unix domain socket
returned: success, path exists and user can read stats
type: boolean
sample: False
uid:
description: Numeric id representing the file owner
returned: success, path exists and user can read stats
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success, path exists and user can read stats
type: int
sample: 1003
size:
description: Size in bytes for a plain file, ammount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
inode:
description: Inode number of the path
returned: success, path exists and user can read stats
type: int
sample: 12758
dev:
description: Device the inode resides on
returned: success, path exists and user can read stats
type: int
sample: 33
nlink:
description: Number of links to the inode (hard links)
returned: success, path exists and user can read stats
type: int
sample: 1
atime:
description: Time of last access
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
mtime:
description: Time of last modification
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
ctime:
description: Time of last metadata update or creation (depends on OS)
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
wusr:
description: Tells you if the owner has write permission
returned: success, path exists and user can read stats
type: boolean
sample: True
rusr:
description: Tells you if the owner has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xusr:
description: Tells you if the owner has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
wgrp:
description: Tells you if the owner's group has write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
rgrp:
description: Tells you if the owner's group has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xgrp:
description: Tells you if the owner's group has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
woth:
description: Tells you if others have write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
roth:
description: Tells you if others have read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xoth:
description: Tells you if others have execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
isuid:
description: Tells you if the invoking user's id matches the owner's id
returned: success, path exists and user can read stats
type: boolean
sample: False
isgid:
description: Tells you if the invoking user's group id matches the owner's group id
returned: success, path exists and user can read stats
type: boolean
sample: False
lnk_source:
description: Original path
returned: success, path exists and user can read stats and the path is a symbolic link
type: string
sample: /home/foobar/21102015-1445431274-908472971
md5:
description: md5 hash of the path
returned: success, path exists and user can read stats and path supports hashing and md5 is supported
type: string
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
checksum_algorithm:
description: hash of the path
returned: success, path exists, user can read stats, path supports hashing and supplied checksum algorithm is available
type: string
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
aliases: ['checksum', 'checksum_algo']
pw_name:
description: User name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: httpd
gr_name:
description: Group name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: www-data
mime_type:
description: file magic data or mime-type
returned: success, path exists and user can read stats and installed python supports it and the `mime` option was true, will return 'unknown' on error.
type: string
sample: PDF document, version 1.2
charset:
description: file character set or encoding
returned: success, path exists and user can read stats and installed python supports it and the `mime` option was true, will return 'unknown' on error.
type: string
sample: us-ascii
'''
import os
import sys
from stat import *
import pwd
import grp
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True, type='path'),
follow = dict(default='no', type='bool'),
get_md5 = dict(default='yes', type='bool'),
get_checksum = dict(default='yes', type='bool'),
checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo', 'checksum']),
mime = dict(default=False, type='bool', aliases=['mime_type', 'mime-type']),
),
supports_check_mode = True
)
path = module.params.get('path')
follow = module.params.get('follow')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
checksum_algorithm = module.params.get('checksum_algorithm')
try:
if follow:
st = os.stat(path)
else:
st = os.lstat(path)
except OSError, e:
if e.errno == errno.ENOENT:
d = { 'exists' : False }
module.exit_json(changed=False, stat=d)
module.fail_json(msg = e.strerror)
mode = st.st_mode
# back to ansible
d = {
'exists' : True,
'path' : path,
'mode' : "%04o" % S_IMODE(mode),
'isdir' : S_ISDIR(mode),
'ischr' : S_ISCHR(mode),
'isblk' : S_ISBLK(mode),
'isreg' : S_ISREG(mode),
'isfifo' : S_ISFIFO(mode),
'islnk' : S_ISLNK(mode),
'issock' : S_ISSOCK(mode),
'uid' : st.st_uid,
'gid' : st.st_gid,
'size' : st.st_size,
'inode' : st.st_ino,
'dev' : st.st_dev,
'nlink' : st.st_nlink,
'atime' : st.st_atime,
'mtime' : st.st_mtime,
'ctime' : st.st_ctime,
'wusr' : bool(mode & stat.S_IWUSR),
'rusr' : bool(mode & stat.S_IRUSR),
'xusr' : bool(mode & stat.S_IXUSR),
'wgrp' : bool(mode & stat.S_IWGRP),
'rgrp' : bool(mode & stat.S_IRGRP),
'xgrp' : bool(mode & stat.S_IXGRP),
'woth' : bool(mode & stat.S_IWOTH),
'roth' : bool(mode & stat.S_IROTH),
'xoth' : bool(mode & stat.S_IXOTH),
'isuid' : bool(mode & stat.S_ISUID),
'isgid' : bool(mode & stat.S_ISGID),
}
if S_ISLNK(mode):
d['lnk_source'] = os.path.realpath(path)
if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK):
# Will fail on FIPS-140 compliant systems
try:
d['md5'] = module.md5(path)
except ValueError:
d['md5'] = None
if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK):
d['checksum'] = module.digest_from_file(path, checksum_algorithm)
try:
pw = pwd.getpwuid(st.st_uid)
d['pw_name'] = pw.pw_name
grp_info = grp.getgrgid(st.st_gid)
d['gr_name'] = grp_info.gr_name
except:
pass
if module.params.get('mime'):
d['mime_type'] = 'unknown'
d['charset'] = 'unknown'
filecmd = [module.get_bin_path('file', True),'-i', path]
try:
rc, out, err = module.run_command(filecmd)
if rc == 0:
mtype, chset = out.split(':')[1].split(';')
d['mime_type'] = mtype.strip()
d['charset'] = chset.split('=')[1].strip()
except:
pass
module.exit_json(changed=False, stat=d)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
zzzombat/lucid-python-django
|
django/conf/locale/hr/formats.py
|
232
|
1758
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y.'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. E Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
bsd-3-clause
|
wri/gfw-api
|
lib/ee/element.py
|
4
|
3051
|
"""Base class for Image, Feature and Collection.
This class is never intended to be instantiated by the user.
"""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
import apifunction
import computedobject
import ee_exception
class Element(computedobject.ComputedObject):
"""Base class for ImageCollection and FeatureCollection."""
_initialized = False
def __init__(self, func, args, opt_varName=None):
"""Constructs a collection by initializing its ComputedObject."""
super(Element, self).__init__(func, args, opt_varName)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Element', 'Element')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
@staticmethod
def name():
return 'Element'
def set(self, *args):
"""Overrides one or more metadata properties of an Element.
Args:
*args: Either a dictionary of properties, or a vararg sequence of
properties, e.g. key1, value1, key2, value2, ...
Returns:
The element with the specified properties overridden.
"""
if len(args) == 1:
properties = args[0]
# If this is a keyword call, unwrap it.
if (isinstance(properties, dict) and
properties.keys() == ['properties'] and
isinstance(properties['properties'],
(dict, computedobject.ComputedObject))):
# Looks like a call with keyword parameters. Extract them.
properties = properties['properties']
if isinstance(properties, dict):
# Still a plain object. Extract its keys. Setting the keys separately
# allows filter propagation.
result = self
for key, value in properties.iteritems():
result = apifunction.ApiFunction.call_(
'Element.set', result, key, value)
elif (isinstance(properties, computedobject.ComputedObject) and
apifunction.ApiFunction.lookupInternal('Element.setMulti')):
# A computed dictionary. Can't set each key separately.
result = apifunction.ApiFunction.call_(
'Element.setMulti', self, properties)
else:
raise ee_exception.EEException(
'When Element.set() is passed one argument, '
'it must be a dictionary.')
else:
# Interpret as key1, value1, key2, value2, ...
if len(args) % 2 != 0:
raise ee_exception.EEException(
'When Element.set() is passed multiple arguments, there '
'must be an even number of them.')
result = self
for i in range(0, len(args), 2):
key = args[i]
value = args[i + 1]
result = apifunction.ApiFunction.call_(
'Element.set', result, key, value)
# Manually cast the result to an image.
return self._cast(result)
|
gpl-2.0
|
SteveHNH/ansible
|
lib/ansible/modules/network/cloudengine/ce_snmp_community.py
|
27
|
33290
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_snmp_community
version_added: "2.4"
short_description: Manages SNMP community configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP community configuration on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
acl_number:
description:
- Access control list number.
required: false
default: null
community_name:
description:
- Unique name to identify the community.
required: false
default: null
access_right:
description:
- Access right read or write.
required: false
default: null
choices: ['read','write']
community_mib_view:
description:
- Mib view name.
required: false
default: null
group_name:
description:
- Unique name to identify the SNMPv3 group.
required: false
default: null
security_level:
description:
- Security level indicating whether to use authentication and encryption.
required: false
default: null
choices: ['noAuthNoPriv', 'authentication', 'privacy']
read_view:
description:
- Mib view name for read.
required: false
default: null
write_view:
description:
- Mib view name for write.
required: false
default: null
notify_view:
description:
- Mib view name for notification.
required: false
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp community test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP community"
ce_snmp_community:
state: present
community_name: Wdz123456789
access_right: write
provider: "{{ cli }}"
- name: "Undo SNMP community"
ce_snmp_community:
state: absent
community_name: Wdz123456789
access_right: write
provider: "{{ cli }}"
- name: "Config SNMP group"
ce_snmp_community:
state: present
group_name: wdz_group
security_level: noAuthNoPriv
acl_number: 2000
provider: "{{ cli }}"
- name: "Undo SNMP group"
ce_snmp_community:
state: absent
group_name: wdz_group
security_level: noAuthNoPriv
acl_number: 2000
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"acl_number": "2000", "group_name": "wdz_group",
"security_level": "noAuthNoPriv", "state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp v3 group": {"snmp_group": ["wdz_group", "noAuthNoPriv", "2000"]}}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent group v3 wdz_group noauthentication acl 2000"]
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec
# get snmp commutiny
CE_GET_SNMP_COMMUNITY_HEADER = """
<filter type="subtree">
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<communitys>
<community>
<communityName></communityName>
<accessRight></accessRight>
"""
CE_GET_SNMP_COMMUNITY_TAIL = """
</community>
</communitys>
</snmp>
</filter>
"""
# merge snmp commutiny
CE_MERGE_SNMP_COMMUNITY_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<communitys>
<community operation="merge">
<communityName>%s</communityName>
<accessRight>%s</accessRight>
"""
CE_MERGE_SNMP_COMMUNITY_TAIL = """
</community>
</communitys>
</snmp>
</config>
"""
# create snmp commutiny
CE_CREATE_SNMP_COMMUNITY_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<communitys>
<community operation="create">
<communityName>%s</communityName>
<accessRight>%s</accessRight>
"""
CE_CREATE_SNMP_COMMUNITY_TAIL = """
</community>
</communitys>
</snmp>
</config>
"""
# delete snmp commutiny
CE_DELETE_SNMP_COMMUNITY_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<communitys>
<community operation="delete">
<communityName>%s</communityName>
<accessRight>%s</accessRight>
"""
CE_DELETE_SNMP_COMMUNITY_TAIL = """
</community>
</communitys>
</snmp>
</config>
"""
# get snmp v3 group
CE_GET_SNMP_V3_GROUP_HEADER = """
<filter type="subtree">
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<snmpv3Groups>
<snmpv3Group>
<groupName></groupName>
<securityLevel></securityLevel>
"""
CE_GET_SNMP_V3_GROUP_TAIL = """
</snmpv3Group>
</snmpv3Groups>
</snmp>
</filter>
"""
# merge snmp v3 group
CE_MERGE_SNMP_V3_GROUP_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<snmpv3Groups>
<snmpv3Group operation="merge">
<groupName>%s</groupName>
<securityLevel>%s</securityLevel>
"""
CE_MERGE_SNMP_V3_GROUP_TAIL = """
</snmpv3Group>
</snmpv3Groups>
</snmp>
</filter>
"""
# create snmp v3 group
CE_CREATE_SNMP_V3_GROUP_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<snmpv3Groups>
<snmpv3Group operation="create">
<groupName>%s</groupName>
<securityLevel>%s</securityLevel>
"""
CE_CREATE_SNMP_V3_GROUP_TAIL = """
</snmpv3Group>
</snmpv3Groups>
</snmp>
</filter>
"""
# delete snmp v3 group
CE_DELETE_SNMP_V3_GROUP_HEADER = """
<config>
<snmp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<snmpv3Groups>
<snmpv3Group operation="delete">
<groupName>%s</groupName>
<securityLevel>%s</securityLevel>
"""
CE_DELETE_SNMP_V3_GROUP_TAIL = """
</snmpv3Group>
</snmpv3Groups>
</snmp>
</filter>
"""
class SnmpCommunity(object):
""" Manages SNMP community configuration """
def netconf_get_config(self, **kwargs):
""" Get configure through netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" Set configure through netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_snmp_community_args(self, **kwargs):
""" Check snmp community args """
module = kwargs["module"]
result = dict()
need_cfg = False
result["community_info"] = []
state = module.params['state']
community_name = module.params['community_name']
access_right = module.params['access_right']
acl_number = module.params['acl_number']
community_mib_view = module.params['community_mib_view']
if community_name and access_right:
if len(community_name) > 32 or len(community_name) == 0:
module.fail_json(
msg='Error: The len of community_name %s is out of [1 - 32].' % community_name)
if acl_number:
if acl_number.isdigit():
if int(acl_number) > 2999 or int(acl_number) < 2000:
module.fail_json(
msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number)
else:
if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1:
module.fail_json(
msg='Error: The len of acl_number %s is out of [1 - 32] or is invalid.' % acl_number)
if community_mib_view:
if len(community_mib_view) > 32 or len(community_mib_view) == 0:
module.fail_json(
msg='Error: The len of community_mib_view %s is out of [1 - 32].' % community_mib_view)
conf_str = CE_GET_SNMP_COMMUNITY_HEADER
if acl_number:
conf_str += "<aclNumber></aclNumber>"
if community_mib_view:
conf_str += "<mibViewName></mibViewName>"
conf_str += CE_GET_SNMP_COMMUNITY_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
community_info = root.findall("data/snmp/communitys/community")
if community_info:
for tmp in community_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["communityName", "accessRight", "aclNumber", "mibViewName"]:
tmp_dict[site.tag] = site.text
result["community_info"].append(tmp_dict)
if result["community_info"]:
for tmp in result["community_info"]:
if "communityName" in tmp.keys():
need_cfg = True
if "accessRight" in tmp.keys():
if state == "present":
if tmp["accessRight"] != access_right:
need_cfg = True
else:
if tmp["accessRight"] == access_right:
need_cfg = True
if acl_number:
if "aclNumber" in tmp.keys():
if state == "present":
if tmp["aclNumber"] != acl_number:
need_cfg = True
else:
if tmp["aclNumber"] == acl_number:
need_cfg = True
if community_mib_view:
if "mibViewName" in tmp.keys():
if state == "present":
if tmp["mibViewName"] != community_mib_view:
need_cfg = True
else:
if tmp["mibViewName"] == community_mib_view:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_snmp_v3_group_args(self, **kwargs):
""" Check snmp v3 group args """
module = kwargs["module"]
result = dict()
need_cfg = False
result["group_info"] = []
state = module.params['state']
group_name = module.params['group_name']
security_level = module.params['security_level']
acl_number = module.params['acl_number']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
community_name = module.params['community_name']
access_right = module.params['access_right']
if group_name and security_level:
if community_name and access_right:
module.fail_json(
msg='Error: Community is used for v1/v2c, group_name is used for v3, do not '
'input at the same time.')
if len(group_name) > 32 or len(group_name) == 0:
module.fail_json(
msg='Error: The len of group_name %s is out of [1 - 32].' % group_name)
if acl_number:
if acl_number.isdigit():
if int(acl_number) > 2999 or int(acl_number) < 2000:
module.fail_json(
msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number)
else:
if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1:
module.fail_json(
msg='Error: The len of acl_number %s is out of [1 - 32] or is invalid.' % acl_number)
if read_view:
if len(read_view) > 32 or len(read_view) < 1:
module.fail_json(
msg='Error: The len of read_view %s is out of [1 - 32].' % read_view)
if write_view:
if len(write_view) > 32 or len(write_view) < 1:
module.fail_json(
msg='Error: The len of write_view %s is out of [1 - 32].' % write_view)
if notify_view:
if len(notify_view) > 32 or len(notify_view) < 1:
module.fail_json(
msg='Error: The len of notify_view %s is out of [1 - 32].' % notify_view)
conf_str = CE_GET_SNMP_V3_GROUP_HEADER
if acl_number:
conf_str += "<aclNumber></aclNumber>"
if read_view:
conf_str += "<readViewName></readViewName>"
if write_view:
conf_str += "<writeViewName></writeViewName>"
if notify_view:
conf_str += "<notifyViewName></notifyViewName>"
conf_str += CE_GET_SNMP_V3_GROUP_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
group_info = root.findall("data/snmp/snmpv3Groups/snmpv3Group")
if group_info:
for tmp in group_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["groupName", "securityLevel", "readViewName", "writeViewName",
"notifyViewName", "aclNumber"]:
tmp_dict[site.tag] = site.text
result["group_info"].append(tmp_dict)
if result["group_info"]:
for tmp in result["group_info"]:
if "groupName" in tmp.keys():
if state == "present":
if tmp["groupName"] != group_name:
need_cfg = True
else:
if tmp["groupName"] == group_name:
need_cfg = True
if "securityLevel" in tmp.keys():
if state == "present":
if tmp["securityLevel"] != security_level:
need_cfg = True
else:
if tmp["securityLevel"] == security_level:
need_cfg = True
if acl_number:
if "aclNumber" in tmp.keys():
if state == "present":
if tmp["aclNumber"] != acl_number:
need_cfg = True
else:
if tmp["aclNumber"] == acl_number:
need_cfg = True
if read_view:
if "readViewName" in tmp.keys():
if state == "present":
if tmp["readViewName"] != read_view:
need_cfg = True
else:
if tmp["readViewName"] == read_view:
need_cfg = True
if write_view:
if "writeViewName" in tmp.keys():
if state == "present":
if tmp["writeViewName"] != write_view:
need_cfg = True
else:
if tmp["writeViewName"] == write_view:
need_cfg = True
if notify_view:
if "notifyViewName" in tmp.keys():
if state == "present":
if tmp["notifyViewName"] != notify_view:
need_cfg = True
else:
if tmp["notifyViewName"] == notify_view:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def merge_snmp_community(self, **kwargs):
""" Merge snmp community operation """
module = kwargs["module"]
community_name = module.params['community_name']
access_right = module.params['access_right']
acl_number = module.params['acl_number']
community_mib_view = module.params['community_mib_view']
conf_str = CE_MERGE_SNMP_COMMUNITY_HEADER % (
community_name, access_right)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if community_mib_view:
conf_str += "<mibViewName>%s</mibViewName>" % community_mib_view
conf_str += CE_MERGE_SNMP_COMMUNITY_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge snmp community failed.')
community_safe_name = "******"
cmd = "snmp-agent community %s %s" % (access_right, community_safe_name)
if acl_number:
cmd += " acl %s" % acl_number
if community_mib_view:
cmd += " mib-view %s" % community_mib_view
return cmd
def create_snmp_community(self, **kwargs):
""" Create snmp community operation """
module = kwargs["module"]
community_name = module.params['community_name']
access_right = module.params['access_right']
acl_number = module.params['acl_number']
community_mib_view = module.params['community_mib_view']
conf_str = CE_CREATE_SNMP_COMMUNITY_HEADER % (
community_name, access_right)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if community_mib_view:
conf_str += "<mibViewName>%s</mibViewName>" % community_mib_view
conf_str += CE_CREATE_SNMP_COMMUNITY_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create snmp community failed.')
community_safe_name = "******"
cmd = "snmp-agent community %s %s" % (access_right, community_safe_name)
if acl_number:
cmd += " acl %s" % acl_number
if community_mib_view:
cmd += " mib-view %s" % community_mib_view
return cmd
def delete_snmp_community(self, **kwargs):
""" Delete snmp community operation """
module = kwargs["module"]
community_name = module.params['community_name']
access_right = module.params['access_right']
acl_number = module.params['acl_number']
community_mib_view = module.params['community_mib_view']
conf_str = CE_DELETE_SNMP_COMMUNITY_HEADER % (
community_name, access_right)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if community_mib_view:
conf_str += "<mibViewName>%s</mibViewName>" % community_mib_view
conf_str += CE_DELETE_SNMP_COMMUNITY_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create snmp community failed.')
community_safe_name = "******"
cmd = "undo snmp-agent community %s %s" % (
access_right, community_safe_name)
return cmd
def merge_snmp_v3_group(self, **kwargs):
""" Merge snmp v3 group operation """
module = kwargs["module"]
group_name = module.params['group_name']
security_level = module.params['security_level']
acl_number = module.params['acl_number']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
conf_str = CE_MERGE_SNMP_V3_GROUP_HEADER % (group_name, security_level)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if read_view:
conf_str += "<readViewName>%s</readViewName>" % read_view
if write_view:
conf_str += "<writeViewName>%s</writeViewName>" % write_view
if notify_view:
conf_str += "<notifyViewName>%s</notifyViewName>" % notify_view
conf_str += CE_MERGE_SNMP_V3_GROUP_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge snmp v3 group failed.')
if security_level == "noAuthNoPriv":
security_level_cli = "noauthentication"
elif security_level == "authentication":
security_level_cli = "authentication"
elif security_level == "privacy":
security_level_cli = "privacy"
cmd = "snmp-agent group v3 %s %s" % (group_name, security_level_cli)
if read_view:
cmd += " read-view %s" % read_view
if write_view:
cmd += " write-view %s" % write_view
if notify_view:
cmd += " notify-view %s" % notify_view
if acl_number:
cmd += " acl %s" % acl_number
return cmd
def create_snmp_v3_group(self, **kwargs):
""" Create snmp v3 group operation """
module = kwargs["module"]
group_name = module.params['group_name']
security_level = module.params['security_level']
acl_number = module.params['acl_number']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
conf_str = CE_CREATE_SNMP_V3_GROUP_HEADER % (
group_name, security_level)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if read_view:
conf_str += "<readViewName>%s</readViewName>" % read_view
if write_view:
conf_str += "<writeViewName>%s</writeViewName>" % write_view
if notify_view:
conf_str += "<notifyViewName>%s</notifyViewName>" % notify_view
conf_str += CE_CREATE_SNMP_V3_GROUP_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create snmp v3 group failed.')
if security_level == "noAuthNoPriv":
security_level_cli = "noauthentication"
elif security_level == "authentication":
security_level_cli = "authentication"
elif security_level == "privacy":
security_level_cli = "privacy"
cmd = "snmp-agent group v3 %s %s" % (group_name, security_level_cli)
if read_view:
cmd += " read-view %s" % read_view
if write_view:
cmd += " write-view %s" % write_view
if notify_view:
cmd += " notify-view %s" % notify_view
if acl_number:
cmd += " acl %s" % acl_number
return cmd
def delete_snmp_v3_group(self, **kwargs):
""" Delete snmp v3 group operation """
module = kwargs["module"]
group_name = module.params['group_name']
security_level = module.params['security_level']
acl_number = module.params['acl_number']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
conf_str = CE_DELETE_SNMP_V3_GROUP_HEADER % (
group_name, security_level)
if acl_number:
conf_str += "<aclNumber>%s</aclNumber>" % acl_number
if read_view:
conf_str += "<readViewName>%s</readViewName>" % read_view
if write_view:
conf_str += "<writeViewName>%s</writeViewName>" % write_view
if notify_view:
conf_str += "<notifyViewName>%s</notifyViewName>" % notify_view
conf_str += CE_DELETE_SNMP_V3_GROUP_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete snmp v3 group failed.')
if security_level == "noAuthNoPriv":
security_level_cli = "noauthentication"
elif security_level == "authentication":
security_level_cli = "authentication"
elif security_level == "privacy":
security_level_cli = "privacy"
cmd = "undo snmp-agent group v3 %s %s" % (
group_name, security_level_cli)
return cmd
def main():
""" main function """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
acl_number=dict(type='str'),
community_name=dict(type='str', no_log=True),
access_right=dict(choices=['read', 'write']),
community_mib_view=dict(type='str'),
group_name=dict(type='str'),
security_level=dict(
choices=['noAuthNoPriv', 'authentication', 'privacy']),
read_view=dict(type='str'),
write_view=dict(type='str'),
notify_view=dict(type='str')
)
argument_spec.update(ce_argument_spec)
required_together = [("community_name", "access_right"), ("security_level", "group_name")]
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
supports_check_mode=True
)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
acl_number = module.params['acl_number']
community_name = module.params['community_name']
community_mib_view = module.params['community_mib_view']
access_right = module.params['access_right']
group_name = module.params['group_name']
security_level = module.params['security_level']
read_view = module.params['read_view']
write_view = module.params['write_view']
notify_view = module.params['notify_view']
snmp_community_obj = SnmpCommunity()
if not snmp_community_obj:
module.fail_json(msg='Error: Init module failed.')
snmp_community_rst = snmp_community_obj.check_snmp_community_args(
module=module)
snmp_v3_group_rst = snmp_community_obj.check_snmp_v3_group_args(
module=module)
# get proposed
proposed["state"] = state
if acl_number:
proposed["acl_number"] = acl_number
if community_name:
proposed["community_name"] = community_name
if community_mib_view:
proposed["community_mib_view"] = community_mib_view
if access_right:
proposed["access_right"] = access_right
if group_name:
proposed["group_name"] = group_name
if security_level:
proposed["security_level"] = security_level
if read_view:
proposed["read_view"] = read_view
if write_view:
proposed["write_view"] = write_view
if notify_view:
proposed["notify_view"] = notify_view
# state exist snmp community config
exist_tmp = dict()
for item in snmp_community_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_community_rst[item]
if exist_tmp:
existing["snmp community"] = exist_tmp
# state exist snmp v3 group config
exist_tmp = dict()
for item in snmp_v3_group_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_v3_group_rst[item]
if exist_tmp:
existing["snmp v3 group"] = exist_tmp
if state == "present":
if snmp_community_rst["need_cfg"]:
if len(snmp_community_rst["community_info"]) != 0:
cmd = snmp_community_obj.merge_snmp_community(module=module)
changed = True
updates.append(cmd)
else:
cmd = snmp_community_obj.create_snmp_community(module=module)
changed = True
updates.append(cmd)
if snmp_v3_group_rst["need_cfg"]:
if len(snmp_v3_group_rst["group_info"]):
cmd = snmp_community_obj.merge_snmp_v3_group(module=module)
changed = True
updates.append(cmd)
else:
cmd = snmp_community_obj.create_snmp_v3_group(module=module)
changed = True
updates.append(cmd)
else:
if snmp_community_rst["need_cfg"]:
cmd = snmp_community_obj.delete_snmp_community(module=module)
changed = True
updates.append(cmd)
if snmp_v3_group_rst["need_cfg"]:
cmd = snmp_community_obj.delete_snmp_v3_group(module=module)
changed = True
updates.append(cmd)
# state end snmp community config
snmp_community_rst = snmp_community_obj.check_snmp_community_args(
module=module)
end_tmp = dict()
for item in snmp_community_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_community_rst[item]
if end_tmp:
end_state["snmp community"] = end_tmp
# state exist snmp v3 group config
snmp_v3_group_rst = snmp_community_obj.check_snmp_v3_group_args(
module=module)
end_tmp = dict()
for item in snmp_v3_group_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_v3_group_rst[item]
if end_tmp:
end_state["snmp v3 group"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
fuchao2012/ewcloud
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py
|
2767
|
2174
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
|
mit
|
leifos/treconomics
|
treconomics_project/treconomics/experiment_configuration.py
|
1
|
8405
|
__author__ = 'leif'
import os
import socket
import logging
import logging.config
import logging.handlers
from autocomplete_trie import AutocompleteTrie
from ifind.search.engines.whooshtrec import Whooshtrec
from experiment_setup import ExperimentSetup
work_dir = os.getcwd()
# when deployed this needs to match up with the hostname, and directory to where the project is
my_whoosh_doc_index_dir = os.path.join(work_dir, 'data/fullindex/')
if 'local' not in socket.gethostname():
my_whoosh_doc_index_dir = '/home/leifos/test500index'
#my_whoosh_doc_index_dir = '/Users/david/Workspace/indexes/aquaint_test500_whoosh'
my_whoosh_query_index_dir = os.path.join(work_dir, "/trec_query_index/index")
my_experiment_log_dir = work_dir
qrels_file = os.path.join(work_dir, "data/TREC2005.qrels.txt")
qrels_diversity_file = os.path.join(work_dir, "data/sigir-combined.diversity.qrels")
stopword_file = os.path.join(work_dir, "data/stopwords.txt")
data_dir = os.path.join(work_dir, "data")
print "Work DIR: " + work_dir
print "QRELS File: " + qrels_file
print "my_whoosh_doc_index_dir: " + my_whoosh_doc_index_dir
print "Stopword file: " + stopword_file
event_logger = logging.getLogger('event_log')
event_logger.setLevel(logging.INFO)
event_logger_handler = logging.FileHandler(os.path.join(my_experiment_log_dir, 'experiment.log'))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
event_logger_handler.setFormatter(formatter)
event_logger.addHandler(event_logger_handler)
# workflow must always start with startexperiment/
exp_work_flows = [
['startexperiment/', 'consent', 'preexperiment/AN/',
'prepracticetask/0/', 'search/0/', 'postpracticetask/0/',
# 'anitatimeinstructions/TC/',
'anitapretasksurvey/1/', 'search/1/', 'anitaposttask0survey/1/',
'anitaposttask1survey/1/', 'anitaposttask2survey/1/',
'anitaposttask3survey/1/', 'taskspacer/',
'anitapretasksurvey/2/', 'search/2/', 'anitaposttask0survey/2/',
'anitaposttask1survey/2/', 'anitaposttask2survey/2/',
'anitaposttask3survey/2/', 'taskspacer/',
'anitapretasksurvey/3/', 'search/3/', 'anitaposttask0survey/3/',
'anitaposttask1survey/3/', 'anitaposttask2survey/3/',
'anitaposttask3survey/3/', 'taskspacer/',
'anitapretasksurvey/4/', 'search/4/', 'anitaposttask0survey/4/',
'anitaposttask1survey/4/', 'anitaposttask2survey/4/',
'anitaposttask3survey/4/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
['startexperiment/', 'consent', 'preexperiment/AN/',
'prepracticetask/0/', 'search/0/', 'postpracticetask/0/',
# 'anitatimeinstructions/NTC/',
'anitapretasksurvey/1/', 'search/1/', 'anitaposttask0survey/1/',
'anitaposttask1survey/1/', 'anitaposttask2survey/1/',
'anitaposttask3survey/1/', 'taskspacer/',
'anitapretasksurvey/2/', 'search/2/', 'anitaposttask0survey/2/',
'anitaposttask1survey/2/', 'anitaposttask2survey/2/',
'anitaposttask3survey/2/', 'taskspacer/',
'anitapretasksurvey/3/', 'search/3/', 'anitaposttask0survey/3/',
'anitaposttask1survey/3/', 'anitaposttask2survey/3/',
'anitaposttask3survey/3/', 'taskspacer/',
'anitapretasksurvey/4/', 'search/4/', 'anitaposttask0survey/4/',
'anitaposttask1survey/4/', 'anitaposttask2survey/4/',
'anitaposttask3survey/4/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
['startexperiment/', 'consent', 'preexperiment/AN/',
'anitaexit1survey/', 'anitaexit2survey/', 'anitaexit3survey/',
'anitademographicssurvey/', 'logout/'],
]
snippet_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/','taskspacer2/0/', 'search/0/', 'postpracticetask/0/', 'taskspacer',
'snippetpretask/1/','taskspacer2/1/', 'search/1/', 'snippetposttask/1/','systemsnippetposttask/1/',
'taskspacer',
'snippetpretask/2/', 'taskspacer2/2/','search/2/', 'snippetposttask/2/','systemsnippetposttask/2/',
'taskspacer',
'snippetpretask/3/','taskspacer2/3/', 'search/3/', 'snippetposttask/3/','systemsnippetposttask/3/',
'taskspacer',
'snippetpretask/4/','taskspacer2/4/', 'search/4/', 'snippetposttask/4/','systemsnippetposttask/4/',
'taskspacer', 'snippetexitsurvey/', 'performance/', 'endexperiment/',
'logout/'
]
diversity_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/', 'search/0/', 'diversityperformancepractice/', 'postpracticetask/0/', 'taskspacer/',
'snippetpretask/1/', 'taskspacerwithdetails/1/', 'search/1/', 'diversityposttask/1/','systemdiversityposttask/1/',
'taskspacer',
'snippetpretask/2/','taskspacerwithdetails/2/','search/2/', 'diversityposttask/2/','systemdiversityposttask/2/',
'taskspacer',
'snippetpretask/3/','taskspacerwithdetails/3/', 'search/3/', 'diversityposttask/3/','systemdiversityposttask/3/',
'taskspacer',
'snippetpretask/4/','taskspacerwithdetails/4/', 'search/4/', 'diversityposttask/4/','systemdiversityposttask/4/',
'taskspacer', 'diversityexitsurvey/', 'diversityperformance/', 'endexperiment/',
'logout/'
]
jaana_flow = [
'startexperiment/', 'preexperiment/UK/',
'demographicssurvey/',
'prepracticetask/0/','taskspacer2/0/', 'search/0/', 'postpracticetask/0/', 'taskspacer',
'snippetpretask/1/','taskspacer2/1/', 'search/1/', 'posttaskquestions/1/',
'taskspacer',
'snippetpretask/2/', 'taskspacer2/2/','search/2/', 'posttaskquestions/2/',
'taskspacer',
'performance/', 'endexperiment/',
'logout/'
]
test_flow = [
'startexperiment/', 'snippetexitsurvey/','snippetpretask/1/', 'snippetposttask/1/','systemsnippetposttask/1/',
'pretask/1/', 'search/1/','taskspacer',
'pretask/2/', 'search/2/','taskspacer',
'pretask/3/', 'search/3/',
'pretask/4/', 'search/4/','endexperiment/',
'logout/'
]
suggestion_trie = AutocompleteTrie(
min_occurrences=3,
suggestion_count=8,
include_stopwords=False,
stopwords_path=os.path.join(work_dir, "data/stopwords.txt"),
vocab_path=os.path.join(work_dir, "data/vocab.txt"),
vocab_trie_path=os.path.join(work_dir, "data/vocab_trie.dat"))
search_engine = Whooshtrec(
whoosh_index_dir=my_whoosh_doc_index_dir,
stopwords_file=stopword_file,
model=1,
newschema=True)
search_engine.key_name = 'bm25'
search_engine.set_fragmenter(frag_type=2, surround=30)
exp_chiir2016 = ExperimentSetup(
workflow= exp_work_flows[0],
engine=search_engine,
practice_topic='408',
topics=['347', '367', '435','354'],
rpp=10,
practice_interface=1,
practice_diversity = 4,
interface=[1, 1, 1, 1],
diversity=[4,4,4,4],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,600,600,600, 600],
delay_results = [0,5,0,5,0]
)
exp_sigir2017 = ExperimentSetup(
workflow=snippet_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '341', '435','408'],
rpp=10,
practice_interface=1,
interface=[1, 2, 3, 4],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,600,600,600, 600]) # 300s = 5min; 600s = 10min; 1200s = 20min
exp_jaana = ExperimentSetup(
workflow=jaana_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '435'],
rpp=10,
practice_interface=1,
interface=[1, 1],
rotation_type=1,
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
timeout=[150,1200,1200])
exp_sigir2018 = ExperimentSetup(
workflow=diversity_flow,
engine=search_engine,
practice_topic='367',
topics=['347', '341', '435','408'],
rpp=10,
practice_interface=1,
interface=[1, 1, 1, 1],
rotation_type=2,
practice_diversity=2,
diversity=[1,2,3,4],
description='standard condition bm25 test',
trie=suggestion_trie,
autocomplete=True,
target=4,
timeout=[10000, 10000, 10000, 10000, 10000]) # 300s = 5min; 600s = 10min; 1200s = 20min, 10000 to stop timeout events firing
# these correspond to conditions
experiment_setups = [exp_sigir2018, exp_jaana, exp_chiir2016]
|
mit
|
Nicop06/ansible
|
lib/ansible/modules/cloud/rackspace/rax_clb_nodes.py
|
19
|
8252
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_clb_nodes
short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
description:
- Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
version_added: "1.4"
options:
address:
required: false
description:
- IP address or domain name of the node
condition:
required: false
choices:
- enabled
- disabled
- draining
description:
- Condition for the node, which determines its role within the load
balancer
load_balancer_id:
required: true
description:
- Load balancer id
node_id:
required: false
description:
- Node id
port:
required: false
description:
- Port number of the load balanced service on the node
state:
required: false
default: "present"
choices:
- present
- absent
description:
- Indicate desired state of the node
type:
required: false
choices:
- primary
- secondary
description:
- Type of node
wait:
required: false
default: "no"
choices:
- "yes"
- "no"
description:
- Wait for the load balancer to become active before returning
wait_timeout:
required: false
default: 30
description:
- How long to wait before giving up and returning an error
weight:
required: false
description:
- Weight of node
author: "Lukasz Kawczynski (@neuroid)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
# Add a new node to the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
address: 10.2.2.3
port: 80
condition: enabled
type: primary
wait: yes
credentials: /path/to/credentials
# Drain connections from a node
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
condition: draining
wait: yes
credentials: /path/to/credentials
# Remove a node from the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
state: absent
wait: yes
credentials: /path/to/credentials
'''
import os
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
def _activate_virtualenv(path):
path = os.path.expanduser(path)
activate_this = os.path.join(path, 'bin', 'activate_this.py')
with open(activate_this) as f:
code = compile(f.read(), activate_this, 'exec')
exec(code)
def _get_node(lb, node_id=None, address=None, port=None):
"""Return a matching node"""
for node in getattr(lb, 'nodes', []):
match_list = []
if node_id is not None:
match_list.append(getattr(node, 'id', None) == node_id)
if address is not None:
match_list.append(getattr(node, 'address', None) == address)
if port is not None:
match_list.append(getattr(node, 'port', None) == port)
if match_list and all(match_list):
return node
return None
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
condition=dict(choices=['enabled', 'disabled', 'draining']),
load_balancer_id=dict(required=True, type='int'),
node_id=dict(type='int'),
port=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
type=dict(choices=['primary', 'secondary']),
virtualenv=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=30, type='int'),
weight=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params['address']
condition = (module.params['condition'] and
module.params['condition'].upper())
load_balancer_id = module.params['load_balancer_id']
node_id = module.params['node_id']
port = module.params['port']
state = module.params['state']
typ = module.params['type'] and module.params['type'].upper()
virtualenv = module.params['virtualenv']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout'] or 1
weight = module.params['weight']
if virtualenv:
try:
_activate_virtualenv(virtualenv)
except IOError as e:
module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
virtualenv, e))
setup_rax_module(module, pyrax)
if not pyrax.cloud_loadbalancers:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
node = _get_node(lb, node_id, address, port)
result = rax_clb_node_to_dict(node)
if state == 'absent':
if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state)
try:
lb.delete_node(node)
result = {}
except pyrax.exc.NotFound:
module.exit_json(changed=False, state=state)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
else: # present
if not node:
if node_id: # Updating a non-existent node
msg = 'Node %d not found' % node_id
if lb.nodes:
msg += (' (available nodes: %s)' %
', '.join([str(x.id) for x in lb.nodes]))
module.fail_json(msg=msg)
else: # Creating a new node
try:
node = pyrax.cloudloadbalancers.Node(
address=address, port=port, condition=condition,
weight=weight, type=typ)
resp, body = lb.add_nodes([node])
result.update(body['nodes'][0])
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
else: # Updating an existing node
mutable = {
'condition': condition,
'type': typ,
'weight': weight,
}
for name, value in mutable.items():
if value is None or value == getattr(node, name):
mutable.pop(name)
if not mutable:
module.exit_json(changed=False, state=state, node=result)
try:
# The diff has to be set explicitly to update node's weight and
# type; this should probably be fixed in pyrax
lb.update_node(node, diff=mutable)
result.update(mutable)
except pyrax.exc.PyraxException as e:
module.fail_json(msg='%s' % e.message)
if wait:
pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
attempts=wait_timeout)
if lb.status != 'ACTIVE':
module.fail_json(
msg='Load balancer not active after %ds (current status: %s)' %
(wait_timeout, lb.status.lower()))
kwargs = {'node': result} if result else {}
module.exit_json(changed=True, state=state, **kwargs)
if __name__ == '__main__':
main()
|
gpl-3.0
|
hmsuorg/telegram-icecast2-bot
|
tice2bot/api/common/__init__.py
|
1
|
3752
|
""" Common libraries """
import socket
import json
from urllib import request
from urllib.error import URLError
from config.bot_config import ICECAST2_SERVERS, ICECAST2_STATS_FILE, SERVERS_LIMIT, SSL
class RadioStream:
"""RadioStreams"""
def __init__(self):
"""__init__"""
self.__stream = None
self.__server = None
self.__online = 0
@property
def stream(self):
"""stream"""
return self.__stream
@stream.setter
def stream(self, stream):
"""
stream
:param stream
"""
self.__stream = stream
@property
def server(self):
"""server"""
return self.__server
@server.setter
def server(self, host):
"""
server
:param host
"""
self.__server = host
@property
def online(self):
"""online"""
return self.__online
@online.setter
def online(self, online):
self.__online = online
class CheckIceCast2Stats:
"""CheckIceCast2Stats"""
def __init__(self):
self.servers = []
def get_stats(self):
"""get_stats"""
for srv in ICECAST2_SERVERS:
try:
srv_request = request.urlopen('{}/{}'.format(srv, ICECAST2_STATS_FILE), None, 3)
except URLError:
# no stream or server is down
pass
else:
if srv_request.getcode() == 200:
stats_data = json.loads(srv_request.read().decode('utf-8'))
if 'icestats' in stats_data and 'source' in stats_data['icestats']:
# if the server limit reach SERVERS_LIMIT - 5, will not be listen as available
if stats_data['icestats']['source']['listeners'] >= SERVERS_LIMIT - 5:
continue
radio = RadioStream()
radio.stream = stats_data['icestats']['source']['listenurl']
if SSL is True and radio.stream.startswith('http:'):
radio.stream = radio.stream.replace('http', 'https')
radio.server = socket.gethostbyname(stats_data['icestats']['host'])
radio.online = stats_data['icestats']['source']['listeners']
self.servers.append(radio)
return self.servers
class CommonCommands:
def get_streams(self, bot, update):
"""
get_streams
:param bot
:param update
"""
ice_stats = CheckIceCast2Stats()
stats = ice_stats.get_stats()
if not stats:
bot.sendMessage(chat_id=update.message.chat_id, text='There are no active streams')
return False
return stats
def get_user_data(self, bot, update):
"""
get_user_data
:param bot
:param update
"""
user_data = bot.get_chat(update.message.chat_id)
# if username does not exist, we cannot continue, so we just message back, how to create a username
if not user_data.username:
bot.sendMessage(
chat_id=update.message.chat_id, text="Please set your username ( telegram -> settings)"
)
bot.sendMessage(
chat_id=update.message.chat_id,
text="More info @ https://telegram.org/faq#q-what-are-usernames-how-do-i-get-one"
)
bot.sendMessage(
chat_id=update.message.chat_id, text="Note that, for this session was created a random username"
)
user_data.username = uuid.uuid4()
return user_data
|
mit
|
rdbhost/Rdbhdb
|
examples/ex9.py
|
1
|
2803
|
# Testing Exception Raising. Direct connection (not through API)
from urllib import urlencode
from urllib2 import Request, urlopen
import json
def postit(url, fields):
postdata = urlencode(fields)
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': str(len(postdata))}
r = Request(url, postdata, headers)
pg = urlopen(r)
text = pg.read()
return text
if __name__ == '__main__':
role = 's000015'
authcode = 'KF7IUQPlwfSth4sBvjdqqanHkojAZzEjMshrkfEV0O53yz6w6v'
url = 'http://www.rdbhost.com/db/'+role
q1 = '''CREATE TABLE accounts (
name varchar(20),
branch_name varchar(20),
balance real
);'''
q2 = '''CREATE TABLE branches (
name varchar(20),
balance real
);'''
q3='''INSERT INTO accounts (name, branch_name, balance)
VALUES ('Alice', 'University', 5473.0);'''
q4='''INSERT INTO accounts (name, branch_name, balance)
VALUES ('Bob', 'Mall', 1678.0);'''
q5='''INSERT INTO accounts (name, branch_name, balance)
VALUES ('Wally', 'Stadium', 25105.0);'''
q6='''INSERT INTO branches (name, balance)
VALUES ('University', 1000000.0);'''
q7='''INSERT INTO branches (name, balance)
VALUES ('Stadium', 1500000.0);'''
q8='''INSERT INTO branches (name, balance)
VALUES ('Mall', 2000000.0);'''
# Create tables accounts and branches and enter values
# Drop the tables created for this test
qs = [q1, q2, q3, q4, q5, q6, q7, q8]
for q in qs:
flds = [ ('q', q),
('format', 'xml'),
('authcode', authcode) ]
res = postit(url, flds)
print res
qa = '''BEGIN;'''
qb = '''UPDATE accounts SET balance = balance - 100.00 WHERE name = 'Alice';'''
qc = '''SAVEPOINT my_savepoint;'''
qd = '''UPDATE accounts SET balance = balance + 100.00 WHERE name = 'Bob';'''
qe = '''-- oops ... forget that and use Wally's account'''
qf = '''ROLLBACK TO my_savepoint;'''
qg = '''UPDATE accounts SET balance = balance + 100.00 WHERE name = 'Wally';'''
qh = '''COMMIT;'''
# Send multiple queries constituting a transaction with roll back
# in a single access
q = qa + qb + qc + qd + qe + qf + qg + qh
flds = [ ('q', q),
('format', 'xml'),
('authcode', authcode) ]
res = postit(url, flds)
print res
x1 = '''DROP TABLE accounts'''
x2 = '''DROP TABLE branches'''
# Drop the tables created for this test
qs = [x1, x2]
for q in qs:
flds = [ ('q', q),
('format', 'xml'),
('authcode', authcode) ]
res = postit(url, flds)
print res
|
mit
|
TimYi/pybuilder
|
src/unittest/python/plugins/python/sphinx_plugin_tests.py
|
6
|
5239
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from mock import Mock, patch
from logging import Logger
from pybuilder.core import Project, Author
from pybuilder.plugins.python.sphinx_plugin import (
assert_sphinx_is_available,
assert_sphinx_quickstart_is_available,
get_sphinx_build_command,
get_sphinx_quickstart_command,
initialize_sphinx_plugin)
class CheckSphinxAvailableTests(TestCase):
@patch('pybuilder.plugins.python.sphinx_plugin.assert_can_execute')
def test_should_check_that_sphinx_can_be_executed(self, mock_assert_can_execute):
mock_logger = Mock(Logger)
assert_sphinx_is_available(mock_logger)
expected_command_line = ['sphinx-build', '--version']
mock_assert_can_execute.assert_called_with(
expected_command_line, 'sphinx', 'plugin python.sphinx')
@patch('pybuilder.plugins.python.sphinx_plugin.assert_can_execute')
def test_should_check_that_sphinx_quickstart_can_be_executed(self, mock_assert_can_execute):
mock_logger = Mock(Logger)
assert_sphinx_quickstart_is_available(mock_logger)
expected_command_line = ['sphinx-quickstart', '--version']
mock_assert_can_execute.assert_called_with(
expected_command_line, 'sphinx', 'plugin python.sphinx')
class SphinxPluginInitializationTests(TestCase):
def setUp(self):
self.project = Project("basedir")
def test_should_leave_user_specified_properties_when_initializing_plugin(self):
expected_properties = {
"sphinx_source_dir": "source_dir",
"sphinx_output_dir": "output_dir",
"sphinx_config_path": "config_path",
"sphinx_doc_author": "author",
"sphinx_doc_builder": "doc_builder",
"sphinx_project_name": "project_name",
"sphinx_project_version": "project_version"
}
for property_name, property_value in expected_properties.items():
self.project.set_property(property_name, property_value)
initialize_sphinx_plugin(self.project)
for property_name, property_value in expected_properties.items():
self.assertEquals(
self.project.get_property(property_name),
property_value)
def test_should_set_default_values_when_initializing_plugin(self):
self.project.authors = [
Author("John Doe", "[email protected]"),
Author("Jane Doe", "[email protected]")]
initialize_sphinx_plugin(self.project)
self.project.set_property("sphinx_project_name", "foo")
self.project.set_property("sphinx_project_version", "1.0")
self.assertEquals(
self.project.get_property("sphinx_source_dir"), "docs")
self.assertEquals(
self.project.get_property("sphinx_output_dir"), "docs/_build/")
self.assertEquals(
self.project.get_property("sphinx_config_path"), "docs")
self.assertEquals(
self.project.get_property("sphinx_doc_author"), 'John Doe, Jane Doe')
self.assertEquals(
self.project.get_property("sphinx_doc_builder"), "html")
self.assertEquals(
self.project.get_property("sphinx_project_name"), "foo")
self.assertEquals(
self.project.get_property("sphinx_project_version"), "1.0")
class SphinxBuildCommandTests(TestCase):
def setUp(self):
self.project = Project("basedir")
def test_should_generate_sphinx_build_command_per_project_properties(self):
self.project.set_property("sphinx_config_path", "docs/")
self.project.set_property("sphinx_source_dir", "docs/")
self.project.set_property("sphinx_output_dir", "docs/_build/")
self.project.set_property("sphinx_doc_builder", 'JSONx')
sphinx_build_command = get_sphinx_build_command(self.project)
self.assertEqual(sphinx_build_command,
"sphinx-build -b JSONx basedir/docs/ basedir/docs/_build/")
def test_should_generate_sphinx_quickstart_command_with_project_properties(self):
self.project.set_property("sphinx_doc_author", "bar")
self.project.set_property("sphinx_project_name", "foo")
self.project.set_property("sphinx_project_version", "3")
self.project.set_property("sphinx_source_dir", "docs/")
sphinx_quickstart_command = get_sphinx_quickstart_command(self.project)
self.assertEqual(sphinx_quickstart_command,
"sphinx-quickstart -q -p 'foo' -a 'bar' -v 3 basedir/docs/")
|
apache-2.0
|
mitya57/django
|
django/utils/synch.py
|
49
|
2550
|
"""
Synchronization primitives:
- reader-writer lock (preference to writers)
(Contributed to Django by [email protected])
"""
import contextlib
import threading
class RWLock:
"""
Classic implementation of reader-writer lock with preference to writers.
Readers can access a resource simultaneously.
Writers get an exclusive access.
API is self-descriptive:
reader_enters()
reader_leaves()
writer_enters()
writer_leaves()
"""
def __init__(self):
self.mutex = threading.RLock()
self.can_read = threading.Semaphore(0)
self.can_write = threading.Semaphore(0)
self.active_readers = 0
self.active_writers = 0
self.waiting_readers = 0
self.waiting_writers = 0
def reader_enters(self):
with self.mutex:
if self.active_writers == 0 and self.waiting_writers == 0:
self.active_readers += 1
self.can_read.release()
else:
self.waiting_readers += 1
self.can_read.acquire()
def reader_leaves(self):
with self.mutex:
self.active_readers -= 1
if self.active_readers == 0 and self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
@contextlib.contextmanager
def reader(self):
self.reader_enters()
try:
yield
finally:
self.reader_leaves()
def writer_enters(self):
with self.mutex:
if self.active_writers == 0 and self.waiting_writers == 0 and self.active_readers == 0:
self.active_writers += 1
self.can_write.release()
else:
self.waiting_writers += 1
self.can_write.acquire()
def writer_leaves(self):
with self.mutex:
self.active_writers -= 1
if self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
elif self.waiting_readers != 0:
t = self.waiting_readers
self.waiting_readers = 0
self.active_readers += t
while t > 0:
self.can_read.release()
t -= 1
@contextlib.contextmanager
def writer(self):
self.writer_enters()
try:
yield
finally:
self.writer_leaves()
|
bsd-3-clause
|
mshafiq9/django
|
django/core/serializers/__init__.py
|
347
|
8194
|
"""
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.core.serializers.base import SerializerDoesNotExist
from django.utils import six
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer(object):
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (object,), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in six.iteritems(_serializers) if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if field.remote_field:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.remote_field.through._meta.auto_created:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
|
bsd-3-clause
|
prune998/ansible
|
lib/ansible/modules/cloud/amazon/sns_topic.py
|
21
|
14058
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
module: sns_topic
short_description: Manages AWS SNS topics and subscriptions
description:
- The C(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
version_added: 2.0
author:
- "Joel Thompson (@joelthompson)"
- "Fernando Jose Pando (@nand0p)"
options:
name:
description:
- The name or ARN of the SNS topic to converge
required: True
state:
description:
- Whether to create or destroy an SNS topic
required: False
default: present
choices: ["absent", "present"]
display_name:
description:
- Display name of the topic
required: False
default: None
policy:
description:
- Policy to apply to the SNS topic
required: False
default: None
delivery_policy:
description:
- Delivery policy to apply to the SNS topic
required: False
default: None
subscriptions:
description:
- List of subscriptions to apply to the topic. Note that AWS requires
subscriptions to be confirmed, so you will need to confirm any new
subscriptions.
required: False
default: []
purge_subscriptions:
description:
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
allow you to purge any PendingConfirmation subscriptions, so if any
exist and would be purged, they are silently skipped. This means that
somebody could come back later and confirm the subscription. Sorry.
Blame Amazon."
required: False
default: True
extends_documentation_fragment: aws
requirements: [ "boto" ]
"""
EXAMPLES = """
- name: Create alarm SNS topic
sns_topic:
name: "alarms"
state: present
display_name: "alarm SNS topic"
delivery_policy:
http:
defaultHealthyRetryPolicy:
minDelayTarget: 2
maxDelayTarget: 4
numRetries: 3
numMaxDelayRetries: 5
backoffFunction: "<linear|arithmetic|geometric|exponential>"
disableSubscriptionOverrides: True
defaultThrottlePolicy:
maxReceivesPerSecond: 10
subscriptions:
- endpoint: "[email protected]"
protocol: "email"
- endpoint: "my_mobile_number"
protocol: "sms"
"""
RETURN = '''
sns_arn:
description: The ARN of the topic you are modifying
type: string
sample: "arn:aws:sns:us-east-1:123456789012:my_topic_name"
sns_topic:
description: Dict of sns topic details
type: dict
sample:
name: sns-topic-name
state: present
display_name: default
policy: {}
delivery_policy: {}
subscriptions_new: []
subscriptions_existing: []
subscriptions_deleted: []
subscriptions_added: []
subscriptions_purge': false
check_mode: false
topic_created: false
topic_deleted: false
attributes_set: []
'''
import time
import json
import re
try:
import boto.sns
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
class SnsTopicManager(object):
""" Handles SNS Topic creation and destruction """
def __init__(self,
module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode,
region,
**aws_connect_params):
self.region = region
self.aws_connect_params = aws_connect_params
self.connection = self._get_boto_connection()
self.changed = False
self.module = module
self.name = name
self.state = state
self.display_name = display_name
self.policy = policy
self.delivery_policy = delivery_policy
self.subscriptions = subscriptions
self.subscriptions_existing = []
self.subscriptions_deleted = []
self.subscriptions_added = []
self.purge_subscriptions = purge_subscriptions
self.check_mode = check_mode
self.topic_created = False
self.topic_deleted = False
self.arn_topic = None
self.attributes_set = []
def _get_boto_connection(self):
try:
return connect_to_aws(boto.sns, self.region,
**self.aws_connect_params)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
def _get_all_topics(self):
next_token = None
topics = []
while True:
try:
response = self.connection.get_all_topics(next_token)
except BotoServerError as err:
self.module.fail_json(msg=err.message)
topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics'])
next_token = response['ListTopicsResponse']['ListTopicsResult']['NextToken']
if not next_token:
break
return [t['TopicArn'] for t in topics]
def _arn_topic_lookup(self):
# topic names cannot have colons, so this captures the full topic name
all_topics = self._get_all_topics()
lookup_topic = ':%s' % self.name
for topic in all_topics:
if topic.endswith(lookup_topic):
return topic
def _create_topic(self):
self.changed = True
self.topic_created = True
if not self.check_mode:
self.connection.create_topic(self.name)
self.arn_topic = self._arn_topic_lookup()
while not self.arn_topic:
time.sleep(3)
self.arn_topic = self._arn_topic_lookup()
def _set_topic_attrs(self):
topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
['Attributes']
if self.display_name and self.display_name != topic_attributes['DisplayName']:
self.changed = True
self.attributes_set.append('display_name')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
self.display_name)
if self.policy and self.policy != json.loads(topic_attributes['Policy']):
self.changed = True
self.attributes_set.append('policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'Policy',
json.dumps(self.policy))
if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
self.changed = True
self.attributes_set.append('delivery_policy')
if not self.check_mode:
self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
json.dumps(self.delivery_policy))
def _canonicalize_endpoint(self, protocol, endpoint):
if protocol == 'sms':
return re.sub('[^0-9]*', '', endpoint)
return endpoint
def _get_topic_subs(self):
next_token = None
while True:
response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['Subscriptions'])
next_token = response['ListSubscriptionsByTopicResponse'] \
['ListSubscriptionsByTopicResult']['NextToken']
if not next_token:
break
def _set_topic_subs(self):
subscriptions_existing_list = []
desired_subscriptions = [(sub['protocol'],
self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
self.subscriptions]
if self.subscriptions_existing:
for sub in self.subscriptions_existing:
sub_key = (sub['Protocol'], sub['Endpoint'])
subscriptions_existing_list.append(sub_key)
if (self.purge_subscriptions and sub_key not in desired_subscriptions and
sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted')):
self.changed = True
self.subscriptions_deleted.append(sub_key)
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
for (protocol, endpoint) in desired_subscriptions:
if (protocol, endpoint) not in subscriptions_existing_list:
self.changed = True
self.subscriptions_added.append(sub)
if not self.check_mode:
self.connection.subscribe(self.arn_topic, protocol, endpoint)
def _delete_subscriptions(self):
# NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
# https://forums.aws.amazon.com/thread.jspa?threadID=85993
for sub in self.subscriptions_existing:
if sub['SubscriptionArn'] not in ('PendingConfirmation', 'Deleted'):
self.subscriptions_deleted.append(sub['SubscriptionArn'])
self.changed = True
if not self.check_mode:
self.connection.unsubscribe(sub['SubscriptionArn'])
def _delete_topic(self):
self.topic_deleted = True
self.changed = True
if not self.check_mode:
self.connection.delete_topic(self.arn_topic)
def ensure_ok(self):
self.arn_topic = self._arn_topic_lookup()
if not self.arn_topic:
self._create_topic()
self._set_topic_attrs()
self._get_topic_subs()
self._set_topic_subs()
def ensure_gone(self):
self.arn_topic = self._arn_topic_lookup()
if self.arn_topic:
self._get_topic_subs()
if self.subscriptions_existing:
self._delete_subscriptions()
self._delete_topic()
def get_info(self):
info = {
'name': self.name,
'state': self.state,
'display_name': self.display_name,
'policy': self.policy,
'delivery_policy': self.delivery_policy,
'subscriptions_new': self.subscriptions,
'subscriptions_existing': self.subscriptions_existing,
'subscriptions_deleted': self.subscriptions_deleted,
'subscriptions_added': self.subscriptions_added,
'subscriptions_purge': self.purge_subscriptions,
'check_mode': self.check_mode,
'topic_created': self.topic_created,
'topic_deleted': self.topic_deleted,
'attributes_set': self.attributes_set
}
return info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present',
'absent']),
display_name=dict(type='str', required=False),
policy=dict(type='dict', required=False),
delivery_policy=dict(type='dict', required=False),
subscriptions=dict(default=[], type='list', required=False),
purge_subscriptions=dict(type='bool', default=True),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params.get('name')
state = module.params.get('state')
display_name = module.params.get('display_name')
policy = module.params.get('policy')
delivery_policy = module.params.get('delivery_policy')
subscriptions = module.params.get('subscriptions')
purge_subscriptions = module.params.get('purge_subscriptions')
check_mode = module.check_mode
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="region must be specified")
sns_topic = SnsTopicManager(module,
name,
state,
display_name,
policy,
delivery_policy,
subscriptions,
purge_subscriptions,
check_mode,
region,
**aws_connect_params)
if state == 'present':
sns_topic.ensure_ok()
elif state == 'absent':
sns_topic.ensure_gone()
sns_facts = dict(changed=sns_topic.changed,
sns_arn=sns_topic.arn_topic,
sns_topic=sns_topic.get_info())
module.exit_json(**sns_facts)
if __name__ == '__main__':
main()
|
gpl-3.0
|
ssorgatem/pulsar
|
pulsar/managers/util/condor/__init__.py
|
4
|
3923
|
"""
Condor helper utilities.
"""
from subprocess import Popen, PIPE, STDOUT, check_call, CalledProcessError
from ..external import parse_external_id
DEFAULT_QUERY_CLASSAD = dict(
universe='vanilla',
getenv='true',
notification='NEVER',
)
PROBLEM_RUNNING_CONDOR_SUBMIT = \
"Problem encountered while running condor_submit."
PROBLEM_PARSING_EXTERNAL_ID = \
"Failed to find job id from condor_submit"
SUBMIT_PARAM_PREFIX = "submit_"
def submission_params(prefix=SUBMIT_PARAM_PREFIX, **kwds):
submission_params = {}
for key in kwds:
value = kwds[key]
key = key.lower()
if key.startswith(prefix):
condor_key = key[len(prefix):]
submission_params[condor_key] = value
return submission_params
def build_submit_description(executable, output, error, user_log, query_params):
"""
Build up the contents of a condor submit description file.
>>> submit_args = dict(executable='/path/to/script', output='o', error='e', user_log='ul')
>>> submit_args['query_params'] = dict()
>>> default_description = build_submit_description(**submit_args)
>>> assert 'executable = /path/to/script' in default_description
>>> assert 'output = o' in default_description
>>> assert 'error = e' in default_description
>>> assert 'queue' in default_description
>>> assert 'universe = vanilla' in default_description
>>> assert 'universe = standard' not in default_description
>>> submit_args['query_params'] = dict(universe='standard')
>>> std_description = build_submit_description(**submit_args)
>>> assert 'universe = vanilla' not in std_description
>>> assert 'universe = standard' in std_description
"""
all_query_params = DEFAULT_QUERY_CLASSAD.copy()
all_query_params.update(query_params)
submit_description = []
for key, value in all_query_params.items():
submit_description.append('%s = %s' % (key, value))
submit_description.append('executable = ' + executable)
submit_description.append('output = ' + output)
submit_description.append('error = ' + error)
submit_description.append('log = ' + user_log)
submit_description.append('queue')
return '\n'.join(submit_description)
def condor_submit(submit_file):
"""
Submit a condor job described by the given file. Parse an external id for
the submission or return None and a reason for the failure.
"""
external_id = None
try:
submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT)
message, _ = submit.communicate()
if submit.returncode == 0:
external_id = parse_external_id(message, type='condor')
else:
message = PROBLEM_PARSING_EXTERNAL_ID
except Exception as e:
message = str(e)
return external_id, message
def condor_stop(external_id):
"""
Stop running condor job and return a failure_message if this
fails.
"""
failure_message = None
try:
check_call(('condor_rm', external_id))
except CalledProcessError:
failure_message = "condor_rm failed"
except Exception as e:
"error encountered calling condor_rm: %s" % e
return failure_message
def summarize_condor_log(log_file, external_id):
"""
"""
log_job_id = external_id.zfill(3)
s1 = s4 = s7 = s5 = s9 = False
with open(log_file, 'r') as log_handle:
for line in log_handle:
if '001 (' + log_job_id + '.' in line:
s1 = True
if '004 (' + log_job_id + '.' in line:
s4 = True
if '007 (' + log_job_id + '.' in line:
s7 = True
if '005 (' + log_job_id + '.' in line:
s5 = True
if '009 (' + log_job_id + '.' in line:
s9 = True
file_size = log_handle.tell()
return s1, s4, s7, s5, s9, file_size
|
apache-2.0
|
AladdinSonni/phantomjs
|
src/qt/qtwebkit/Source/ThirdParty/gtest/test/gtest_xml_outfiles_test.py
|
718
|
5312
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "[email protected] (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO([email protected]): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
bsd-3-clause
|
ArcherCraftStore/ArcherVMPeridot
|
nodechip/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py
|
486
|
15539
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name == '2013' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 non-Express has a x64-x86 cross that we want to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '12.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
apache-2.0
|
Houzz/luigi
|
luigi/target.py
|
1
|
12230
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Target` class.
It is a central concept of Luigi and represents the state of the workflow.
"""
import abc
import io
import os
import random
import tempfile
import logging
import warnings
from luigi import six
logger = logging.getLogger('luigi-interface')
@six.add_metaclass(abc.ABCMeta)
class Target(object):
"""
A Target is a resource generated by a :py:class:`~luigi.task.Task`.
For example, a Target might correspond to a file in HDFS or data in a database. The Target
interface defines one method that must be overridden: :py:meth:`exists`, which signifies if the
Target has been created or not.
Typically, a :py:class:`~luigi.task.Task` will define one or more Targets as output, and the Task
is considered complete if and only if each of its output Targets exist.
"""
@abc.abstractmethod
def exists(self):
"""
Returns ``True`` if the :py:class:`Target` exists and ``False`` otherwise.
"""
pass
class FileSystemException(Exception):
"""
Base class for generic file system exceptions.
"""
pass
class FileAlreadyExists(FileSystemException):
"""
Raised when a file system operation can't be performed because
a directory exists but is required to not exist.
"""
pass
class MissingParentDirectory(FileSystemException):
"""
Raised when a parent directory doesn't exist.
(Imagine mkdir without -p)
"""
pass
class NotADirectory(FileSystemException):
"""
Raised when a file system operation can't be performed because
an expected directory is actually a file.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class FileSystem(object):
"""
FileSystem abstraction used in conjunction with :py:class:`FileSystemTarget`.
Typically, a FileSystem is associated with instances of a :py:class:`FileSystemTarget`. The
instances of the py:class:`FileSystemTarget` will delegate methods such as
:py:meth:`FileSystemTarget.exists` and :py:meth:`FileSystemTarget.remove` to the FileSystem.
Methods of FileSystem raise :py:class:`FileSystemException` if there is a problem completing the
operation.
"""
@abc.abstractmethod
def exists(self, path):
"""
Return ``True`` if file or directory at ``path`` exist, ``False`` otherwise
:param str path: a path within the FileSystem to check for existence.
"""
pass
@abc.abstractmethod
def remove(self, path, recursive=True, skip_trash=True):
""" Remove file or directory at location ``path``
:param str path: a path within the FileSystem to remove.
:param bool recursive: if the path is a directory, recursively remove the directory and all
of its descendants. Defaults to ``True``.
"""
pass
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
Create directory at location ``path``
Creates the directory at ``path`` and implicitly create parent
directories if they do not already exist.
:param str path: a path within the FileSystem to create as a directory.
:param bool parents: Create parent directories when necessary. When
parents=False and the parent directory doesn't
exist, raise luigi.target.MissingParentDirectory
:param bool raise_if_exists: raise luigi.target.FileAlreadyExists if
the folder already exists.
"""
raise NotImplementedError("mkdir() not implemented on {0}".format(self.__class__.__name__))
def isdir(self, path):
"""
Return ``True`` if the location at ``path`` is a directory. If not, return ``False``.
:param str path: a path within the FileSystem to check as a directory.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("isdir() not implemented on {0}".format(self.__class__.__name__))
def listdir(self, path):
"""Return a list of files rooted in path.
This returns an iterable of the files rooted at ``path``. This is intended to be a
recursive listing.
:param str path: a path within the FileSystem to list.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("listdir() not implemented on {0}".format(self.__class__.__name__))
def move(self, path, dest):
"""
Move a file, as one would expect.
"""
raise NotImplementedError("move() not implemented on {0}".format(self.__class__.__name__))
def rename_dont_move(self, path, dest):
"""
Potentially rename ``path`` to ``dest``, but don't move it into the
``dest`` folder (if it is a folder). This relates to :ref:`AtomicWrites`.
This method has a reasonable but not bullet proof default
implementation. It will just do ``move()`` if the file doesn't
``exists()`` already.
"""
warnings.warn("File system {} client doesn't support atomic mv.".format(self.__class__.__name__))
if self.exists(dest):
raise FileAlreadyExists()
self.move(path, dest)
def rename(self, *args, **kwargs):
"""
Alias for ``move()``
"""
self.move(*args, **kwargs)
def copy(self, path, dest):
"""
Copy a file or a directory with contents.
Currently, LocalFileSystem and MockFileSystem support only single file
copying but S3Client copies either a file or a directory as required.
"""
raise NotImplementedError("copy() not implemented on {0}".
format(self.__class__.__name__))
class FileSystemTarget(Target):
"""
Base class for FileSystem Targets like :class:`~luigi.local_target.LocalTarget` and :class:`~luigi.contrib.hdfs.HdfsTarget`.
A FileSystemTarget has an associated :py:class:`FileSystem` to which certain operations can be
delegated. By default, :py:meth:`exists` and :py:meth:`remove` are delegated to the
:py:class:`FileSystem`, which is determined by the :py:attr:`fs` property.
Methods of FileSystemTarget raise :py:class:`FileSystemException` if there is a problem
completing the operation.
"""
def __init__(self, path):
"""
Initializes a FileSystemTarget instance.
:param str path: the path associated with this FileSystemTarget.
"""
self.path = path
@abc.abstractproperty
def fs(self):
"""
The :py:class:`FileSystem` associated with this FileSystemTarget.
"""
raise NotImplementedError()
@abc.abstractmethod
def open(self, mode):
"""
Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param str mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options.
"""
pass
def exists(self):
"""
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:attr:`fs`.
"""
path = self.path
if '*' in path or '?' in path or '[' in path or '{' in path:
logger.warning("Using wildcards in path %s might lead to processing of an incomplete dataset; "
"override exists() to suppress the warning.", path)
return self.fs.exists(path)
def remove(self):
"""
Remove the resource at the path specified by this FileSystemTarget.
This method is implemented by using :py:attr:`fs`.
"""
self.fs.remove(self.path)
def temporary_path(self):
"""
A context manager that enables a reasonably short, general and
magic-less way to solve the :ref:`AtomicWrites`.
* On *entering*, it will create the parent directories so the
temporary_path is writeable right away.
This step uses :py:meth:`FileSystem.mkdir`.
* On *exiting*, it will move the temporary file if there was no exception thrown.
This step uses :py:meth:`FileSystem.rename_dont_move`
The file system operations will be carried out by calling them on :py:attr:`fs`.
The typical use case looks like this:
.. code:: python
class MyTask(luigi.Task):
def output(self):
return MyFileSystemTarget(...)
def run(self):
with self.output().temporary_path() as self.temp_output_path:
run_some_external_command(output_path=self.temp_output_path)
"""
class _Manager(object):
target = self
def __init__(self):
num = random.randrange(0, 1e10)
slashless_path = self.target.path.rstrip('/').rstrip("\\")
self._temp_path = '{}-luigi-tmp-{:010}{}'.format(
slashless_path,
num,
self.target._trailing_slash())
# TODO: os.path doesn't make sense here as it's os-dependent
tmp_dir = os.path.dirname(slashless_path)
if tmp_dir:
self.target.fs.mkdir(tmp_dir, parents=True, raise_if_exists=False)
def __enter__(self):
return self._temp_path
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
# There were no exceptions
self.target.fs.rename_dont_move(self._temp_path, self.target.path)
return False # False means we don't suppress the exception
return _Manager()
def _touchz(self):
with self.open('w'):
pass
def _trailing_slash(self):
# I suppose one day schema-like paths, like
# file:///path/blah.txt?params=etc can be parsed too
return self.path[-1] if self.path[-1] in r'\/' else ''
class AtomicLocalFile(io.BufferedWriter):
"""Abstract class to create a Target that creates
a temporary file in the local filesystem before
moving it to its final destination.
This class is just for the writing part of the Target. See
:class:`luigi.local_target.LocalTarget` for example
"""
def __init__(self, path):
self.__tmp_path = self.generate_tmp_path(path)
self.path = path
super(AtomicLocalFile, self).__init__(io.FileIO(self.__tmp_path, 'w'))
def close(self):
super(AtomicLocalFile, self).close()
self.move_to_final_destination()
def generate_tmp_path(self, path):
return os.path.join(tempfile.gettempdir(), 'luigi-s3-tmp-%09d' % random.randrange(0, 1e10))
def move_to_final_destination(self):
raise NotImplementedError()
def __del__(self):
if os.path.exists(self.tmp_path):
os.remove(self.tmp_path)
@property
def tmp_path(self):
return self.__tmp_path
def __exit__(self, exc_type, exc, traceback):
" Close/commit the file if there are no exception "
if exc_type:
return
return super(AtomicLocalFile, self).__exit__(exc_type, exc, traceback)
|
apache-2.0
|
gamechanger/dusty
|
dusty/systems/nfs/server.py
|
1
|
4327
|
from __future__ import absolute_import
import logging
import os
from subprocess import CalledProcessError
from ... import constants
from .. import config_file
from ...compiler.spec_assembler import get_all_repos
from ...log import log_to_client
from ...source import Repo
from ..virtualbox import get_docker_vm_ip
from ...subprocess import check_and_log_output_and_error, check_output, check_call
def configure_nfs_server():
"""
This function is used with `dusty up`. It will check all active repos to see if
they are exported. If any are missing, it will replace current dusty exports with
exports that are needed for currently active repos, and restart
the nfs server
"""
repos_for_export = get_all_repos(active_only=True, include_specs_repo=False)
current_exports = _get_current_exports()
needed_exports = _get_exports_for_repos(repos_for_export)
_ensure_managed_repos_dir_exists()
if not needed_exports.difference(current_exports):
if not _server_is_running():
_restart_server()
return
_write_exports_config(needed_exports)
_restart_server()
def add_exports_for_repos(repos):
"""
This function will add needed entries to /etc/exports. It will not remove any
entries from the file. It will then restart the server if necessary
"""
current_exports = _get_current_exports()
needed_exports = _get_exports_for_repos(repos)
if not needed_exports.difference(current_exports):
if not _server_is_running():
_restart_server()
return
_write_exports_config(current_exports.union(needed_exports))
_restart_server()
def _ensure_managed_repos_dir_exists():
"""
Our exports file will be invalid if this folder doesn't exist, and the NFS server
will not run correctly.
"""
if not os.path.exists(constants.REPOS_DIR):
os.makedirs(constants.REPOS_DIR)
def _get_exports_for_repos(repos):
config_set = set([_export_for_dusty_managed()])
for repo in repos:
if not repo.is_overridden:
continue
config_set.add(_export_for_repo(repo))
return config_set
def _write_exports_config(exports_set):
exports_config = ''.join(exports_set)
log_to_client("Your /etc/exports file is missing a configuration that dusty expects, so we will try and update the file to look like this:")
log_to_client("------------")
log_to_client(exports_config)
log_to_client("------------")
log_to_client("If the write fails, you might try manually updating your /etc/exports file to include the above lines.")
current_config = _read_exports_contents()
current_config = config_file.remove_current_dusty_config(current_config)
current_config += config_file.create_config_section(exports_config)
config_file.write(constants.EXPORTS_PATH, current_config)
def _export_for_dusty_managed():
return '{} {} -alldirs -maproot=0:0\n'.format(os.path.realpath(constants.REPOS_DIR), get_docker_vm_ip())
def _export_for_repo(repo):
return '{} {} -alldirs -maproot={}\n'.format(os.path.realpath(repo.local_path), get_docker_vm_ip(), _maproot_for_repo(repo))
def _maproot_for_repo(repo):
stat = os.stat(repo.local_path)
return '{}:{}'.format(stat.st_uid, stat.st_gid)
def _check_exports():
try:
check_and_log_output_and_error(['nfsd', 'checkexports'], demote=False)
except CalledProcessError:
log_to_client('There\'s a conflict in your /etc/exports file - check existing configuration there and remove conflicts.')
log_to_client('`nfsd checkexports` will verify that this file is valid.')
raise
def _restart_server():
_check_exports()
if _server_is_running():
check_call(['nfsd', 'update'], demote=False)
else:
log_to_client('Restarting NFS Server')
check_and_log_output_and_error(['nfsd', 'restart'], demote=False)
def _read_exports_contents():
if os.path.isfile(constants.EXPORTS_PATH):
return config_file.read(constants.EXPORTS_PATH)
else:
return ''
def _get_current_exports():
dusty_config = config_file.get_dusty_config_section(_read_exports_contents())
return set(dusty_config.splitlines(True))
def _server_is_running():
return 'nfsd is running' in check_output(['nfsd', 'status'])
|
mit
|
grangier/python-goose
|
tests/extractors/links.py
|
15
|
1177
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from base import TestExtractionBase
class TestArticleLinks(TestExtractionBase):
def test_links(self):
article = self.getArticle()
number_links = len(article.links)
expected_number_links = self.data['expected']['links']
self.assertEqual(number_links, expected_number_links)
|
apache-2.0
|
XuGuohui/firmware
|
hal/src/photon/libraries/crypto/micro-ecc/emk_project.py
|
47
|
4375
|
import os
c, link, asm, utils = emk.module("c", "link", "asm", "utils")
default_compile_flags = ["-fvisibility=hidden", "-Wall", "-Wextra", "-Wshadow", "-Werror", "-Wno-missing-field-initializers", "-Wno-unused-parameter", \
"-Wno-comment", "-Wno-unused", "-Wno-unknown-pragmas"]
default_link_flags = []
opt_flags = {"dbg":["-g"], "std":["-O2"], "max":["-O3"], "small":["-Os"]}
opt_link_flags = {"dbg":[], "std":[], "max":[], "small":[]}
c_flags = ["-std=c99"]
cxx_flags = ["-std=c++11", "-Wno-reorder", "-fno-rtti", "-fno-exceptions"]
c_link_flags = []
cxx_link_flags = ["-fno-rtti", "-fno-exceptions"]
def setup_build_dir():
build_arch = None
if "arch" in emk.options:
build_arch = emk.options["arch"]
elif not emk.cleaning:
build_arch = "osx"
emk.options["arch"] = build_arch
opt_level = None
if "opt" in emk.options:
level = emk.options["opt"]
if level in opt_flags:
opt_level = level
else:
emk.log.warning("Unknown optimization level '%s'" % (level))
elif not emk.cleaning:
opt_level = "dbg"
emk.options["opt"] = opt_level
dirs = ["__build__"]
if build_arch:
dirs.append(build_arch)
if opt_level:
dirs.append(opt_level)
emk.build_dir = os.path.join(*dirs)
def setup_osx():
global c
global link
flags = [("-arch", "x86_64"), "-fno-common", "-Wnewline-eof"]
c.flags.extend(flags)
c.cxx.flags += ["-stdlib=libc++"]
link.cxx.flags += ["-stdlib=libc++"]
link_flags = [("-arch", "x86_64")]
link.local_flags.extend(link_flags)
def setup_avr():
global c
global link
c.compiler = c.GccCompiler("/Projects/avr-tools/bin/avr-")
c.flags += ["-mmcu=atmega256rfr2", "-ffunction-sections", "-fdata-sections"]
link.linker = link.GccLinker("/Projects/avr-tools/bin/avr-")
link.flags += ["-mmcu=atmega256rfr2", "-mrelax", "-Wl,--gc-sections"]
link.strip = True
def setup_arm_thumb():
global c
global link
global asm
global utils
asm.assembler = asm.GccAssembler("/cross/arm_cortex/bin/arm-none-eabi-")
c.compiler = c.GccCompiler("/cross/arm_cortex/bin/arm-none-eabi-")
link.linker = link.GccLinker("/cross/arm_cortex/bin/arm-none-eabi-")
c.flags.extend(["-mcpu=cortex-m0", "-mthumb", "-ffunction-sections", "-fdata-sections", "-fno-builtin-fprintf", "-fno-builtin-printf"])
c.defines["LPC11XX"] = 1
link.local_flags.extend(["-mcpu=cortex-m0", "-mthumb", "-nostartfiles", "-nostdlib", "-Wl,--gc-sections"])
link.local_flags.extend(["-Tflash.lds", "-L/Projects/lpc11xx/core", "/Projects/lpc11xx/core/" + emk.build_dir + "/board_cstartup.o"])
link.local_syslibs += ["gcc"]
link.depdirs += ["/Projects/lpc11xx/stdlib"]
def do_objcopy(produces, requires):
utils.call("/cross/arm_cortex/bin/arm-none-eabi-objcopy", "-O", "binary", requires[0], produces[0])
def handle_exe(path):
emk.depend(path, "/Projects/lpc11xx/core/" + emk.build_dir + "/board_cstartup.o")
emk.rule(do_objcopy, path + ".bin", path, cwd_safe=True, ex_safe=True)
emk.autobuild(path + ".bin")
link.exe_funcs.append(handle_exe)
link.strip = True
emk.recurse("/Projects/lpc11xx/core")
def setup_linux_rpi():
global c
global link
c.compiler = c.GccCompiler("/Volumes/xtools/arm-none-linux-gnueabi/bin/arm-none-linux-gnueabi-")
link.linker = link.GccLinker("/Volumes/xtools/arm-none-linux-gnueabi/bin/arm-none-linux-gnueabi-")
c.flags.extend(["-fomit-frame-pointer"])
setup_build_dir()
setup_funcs = {"osx":setup_osx, "avr":setup_avr, "arm_thumb":setup_arm_thumb, "rpi": setup_linux_rpi}
if not emk.cleaning:
build_arch = emk.options["arch"]
opt_level = emk.options["opt"]
c.flags.extend(default_compile_flags)
c.flags.extend(opt_flags[opt_level])
c.c.flags.extend(c_flags)
c.cxx.flags.extend(cxx_flags)
link.local_flags.extend(default_link_flags)
link.local_flags.extend(opt_link_flags[opt_level])
link.c.local_flags.extend(c_link_flags)
link.cxx.local_flags.extend(cxx_link_flags)
c.include_dirs.append("$:proj:$")
if build_arch in setup_funcs:
setup_funcs[build_arch]()
else:
raise emk.BuildError("Unknown target arch '%s'" % (build_arch))
c.defines["TARGET_ARCH_" + build_arch.upper()] = 1
|
gpl-3.0
|
Antiun/odoo
|
addons/marketing_campaign/report/__init__.py
|
441
|
1071
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import campaign_analysis
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/eggs/boto-2.27.0-py2.7.egg/boto/file/connection.py
|
111
|
1480
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# File representation of connection, for use with "file://" URIs.
from bucket import Bucket
class FileConnection(object):
def __init__(self, file_storage_uri):
# FileConnections are per-file storage URI.
self.file_storage_uri = file_storage_uri
def get_bucket(self, bucket_name, validate=True, headers=None):
return Bucket(bucket_name, self.file_storage_uri.object_name)
|
gpl-3.0
|
druuu/django
|
django/conf/urls/__init__.py
|
264
|
4592
|
import warnings
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import (
LocaleRegexURLResolver, RegexURLPattern, RegexURLResolver,
)
from django.utils import six
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
__all__ = ['handler400', 'handler403', 'handler404', 'handler500', 'include', 'patterns', 'url']
handler400 = 'django.views.defaults.bad_request'
handler403 = 'django.views.defaults.permission_denied'
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
def include(arg, namespace=None, app_name=None):
if app_name and not namespace:
raise ValueError('Must specify a namespace if specifying app_name.')
if app_name:
warnings.warn(
'The app_name argument to django.conf.urls.include() is deprecated. '
'Set the app_name in the included URLconf instead.',
RemovedInDjango20Warning, stacklevel=2
)
if isinstance(arg, tuple):
# callable returning a namespace hint
try:
urlconf_module, app_name = arg
except ValueError:
if namespace:
raise ImproperlyConfigured(
'Cannot override the namespace for a dynamic module that provides a namespace'
)
warnings.warn(
'Passing a 3-tuple to django.conf.urls.include() is deprecated. '
'Pass a 2-tuple containing the list of patterns and app_name, '
'and provide the namespace argument to include() instead.',
RemovedInDjango20Warning, stacklevel=2
)
urlconf_module, app_name, namespace = arg
else:
# No namespace hint - use manually provided namespace
urlconf_module = arg
if isinstance(urlconf_module, six.string_types):
urlconf_module = import_module(urlconf_module)
patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module)
app_name = getattr(urlconf_module, 'app_name', app_name)
if namespace and not app_name:
warnings.warn(
'Specifying a namespace in django.conf.urls.include() without '
'providing an app_name is deprecated. Set the app_name attribute '
'in the included module, or pass a 2-tuple containing the list of '
'patterns and app_name instead.',
RemovedInDjango20Warning, stacklevel=2
)
namespace = namespace or app_name
# Make sure we can iterate through the patterns (without this, some
# testcases will break).
if isinstance(patterns, (list, tuple)):
for url_pattern in patterns:
# Test if the LocaleRegexURLResolver is used within the include;
# this should throw an error since this is not allowed!
if isinstance(url_pattern, LocaleRegexURLResolver):
raise ImproperlyConfigured(
'Using i18n_patterns in an included URLconf is not allowed.')
return (urlconf_module, app_name, namespace)
def patterns(prefix, *args):
warnings.warn(
'django.conf.urls.patterns() is deprecated and will be removed in '
'Django 1.10. Update your urlpatterns to be a list of '
'django.conf.urls.url() instances instead.',
RemovedInDjango110Warning, stacklevel=2
)
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list, tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, six.string_types):
warnings.warn(
'Support for string view arguments to url() is deprecated and '
'will be removed in Django 1.10 (got %s). Pass the callable '
'instead.' % view,
RemovedInDjango110Warning, stacklevel=2
)
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
|
bsd-3-clause
|
learningequality/kolibri
|
kolibri/core/public/api.py
|
1
|
9585
|
import datetime
import gzip
import io
import json
import time
from django.db.models import Q
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import HttpResponseNotFound
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.gzip import gzip_page
from morango.models.core import TransferSession
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .. import error_constants
from .constants.user_sync_statuses import QUEUED
from .constants.user_sync_statuses import SYNC
from .utils import get_device_info
from .utils import get_device_setting
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import LocalFile
from kolibri.core.content.serializers import PublicChannelSerializer
from kolibri.core.content.utils.file_availability import generate_checksum_integer_mask
from kolibri.core.device.models import SyncQueue
from kolibri.core.device.utils import allow_peer_unlisted_channel_import
MAX_CONCURRENT_SYNCS = 1
HANDSHAKING_TIME = 5
class InfoViewSet(viewsets.ViewSet):
"""
An equivalent endpoint in studio which allows kolibri devices to know
if this device can serve content.
Spec doc: https://docs.google.com/document/d/1XKXQe25sf9Tht6uIXvqb3T40KeY3BLkkexcV08wvR9M/edit#
"""
def list(self, request):
"""Returns metadata information about the device"""
return Response(get_device_info())
def _get_channel_list(version, params, identifier=None):
if version == "v1":
return _get_channel_list_v1(params, identifier=identifier)
else:
raise LookupError()
def _get_channel_list_v1(params, identifier=None):
keyword = params.get("keyword", "").strip()
language_id = params.get("language", "").strip()
channels = None
if identifier:
channels = ChannelMetadata.objects.filter(pk=identifier)
else:
channels = ChannelMetadata.objects.all()
if keyword != "":
channels = channels.filter(
Q(name__icontains=keyword) | Q(description__icontains=keyword)
)
if language_id != "":
matching_tree_ids = (
ContentNode.objects.prefetch_related("files")
.filter(
Q(lang__id__icontains=language_id)
| Q(files__lang__id__icontains=language_id)
)
.values_list("tree_id", flat=True)
)
channels = channels.filter(
Q(root__lang__id__icontains=language_id)
| Q(root__tree_id__in=matching_tree_ids)
)
if not allow_peer_unlisted_channel_import():
channels = channels.exclude(public=False)
return channels.filter(root__available=True).distinct()
@api_view(["GET"])
def get_public_channel_list(request, version):
""" Endpoint: /public/<version>/channels/?=<query params> """
try:
channel_list = _get_channel_list(version, request.query_params)
except LookupError:
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
return HttpResponse(
json.dumps(PublicChannelSerializer(channel_list, many=True).data),
content_type="application/json",
)
@api_view(["GET"])
def get_public_channel_lookup(request, version, identifier):
""" Endpoint: /public/<version>/channels/lookup/<identifier> """
try:
channel_list = _get_channel_list(
version,
request.query_params,
identifier=identifier.strip().replace("-", ""),
)
except LookupError:
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
if not channel_list.exists():
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
return HttpResponse(
json.dumps(PublicChannelSerializer(channel_list, many=True).data),
content_type="application/json",
)
@csrf_exempt
@gzip_page
def get_public_file_checksums(request, version):
""" Endpoint: /public/<version>/file_checksums/ """
if version == "v1":
if request.content_type == "application/json":
data = request.body
elif request.content_type == "application/gzip":
with gzip.GzipFile(fileobj=io.BytesIO(request.body)) as f:
data = f.read()
else:
return HttpResponseBadRequest("POST body must be either json or gzip")
checksums = json.loads(data.decode("utf-8"))
available_checksums = set(
LocalFile.objects.filter(available=True)
.filter_by_uuids(checksums)
.values_list("id", flat=True)
.distinct()
)
return HttpResponse(
generate_checksum_integer_mask(checksums, available_checksums),
content_type="application/octet-stream",
)
return HttpResponseNotFound(
json.dumps({"id": error_constants.NOT_FOUND, "metadata": {"view": ""}}),
content_type="application/json",
)
def position_in_queue(id):
try:
client_time = SyncQueue.objects.get(pk=id)
before_client = SyncQueue.objects.filter(datetime__lt=client_time.datetime)
pos = before_client.count()
except SyncQueue.DoesNotExist:
pos = 0 # in case the queue is empty
return pos
class SyncQueueViewSet(viewsets.ViewSet):
def list(self, request):
"""Returns length of the queue for each of the available facilities"""
SyncQueue.clean_stale() # first, ensure not expired devices are in the queue
facilities = Facility.objects.all()
queue = {}
for facility in facilities:
queue[facility.id] = SyncQueue.objects.filter(
user__facility=facility
).count()
return Response(queue)
def check_queue(self, pk=None):
last_activity = datetime.datetime.now() - datetime.timedelta(minutes=5)
current_transfers = TransferSession.objects.filter(
active=True, last_activity_timestamp__gte=last_activity
).count()
if current_transfers < MAX_CONCURRENT_SYNCS:
allow_sync = True
data = {"action": SYNC}
else:
# polling time at least HANDSHAKING_TIME seconds per position in the queue to
# be greater than the time needed for the handshake part of the ssl protocol
if pk is not None:
# if updating the element let's assign
# its time depending on its position in the queue
polling = HANDSHAKING_TIME * (
MAX_CONCURRENT_SYNCS + position_in_queue(pk)
)
else:
polling = HANDSHAKING_TIME * (
MAX_CONCURRENT_SYNCS + SyncQueue.objects.all().count()
)
data = {
"action": QUEUED,
"keep_alive": polling,
}
allow_sync = False
return (allow_sync, data)
def create(self, request):
SyncQueue.clean_stale() # first, ensure not expired devices are in the queue
is_SoUD = get_device_setting("subset_of_users_device", False)
if is_SoUD:
content = {"I'm a Subset of users device": "Nothing to do here"}
# would love to use HTTP 418, but it's not fully usable in browsers
return Response(content, status=status.HTTP_400_BAD_REQUEST)
user = request.data.get("user") or request.query_params.get("user")
if user is None:
content = {"Missing parameter": "User is required"}
return Response(content, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
if not FacilityUser.objects.filter(id=user).exists():
content = {"This user is not registered in any of this server facilities"}
return Response(content, status=status.HTTP_404_NOT_FOUND)
allow_sync, data = self.check_queue()
if not allow_sync:
element, _ = SyncQueue.objects.get_or_create(
user_id=user,
keep_alive=data["keep_alive"],
)
data["id"] = element.id
return Response(data)
def update(self, request, pk=None):
SyncQueue.clean_stale() # first, ensure not expired devices are in the queue
allow_sync, data = self.check_queue(pk)
if not allow_sync:
element = SyncQueue.objects.filter(id=pk).first()
if not element:
# this device has been deleted from the queue, likely due to keep alive expiration
content = {
"Missing element": "This device is not registered in any of this server facilities"
}
return Response(content, status=status.HTTP_404_NOT_FOUND)
element.keep_alive = data["keep_alive"]
element.updated = time.time()
element.save()
data["id"] = element.id
else:
SyncQueue.objects.filter(id=pk).delete()
return Response(data)
|
mit
|
boar/boar
|
boar/articles/feeds.py
|
1
|
4136
|
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed, add_domain
from django.utils.feedgenerator import Atom1Feed
from boar.articles.models import Article, Section, PodcastMetadata, Tag
ITEMS = 30
class ArticlesFeed(Feed):
title = 'The Boar'
link = '/'
description = "The University of Warwick Students' Newspaper"
description_template = 'articles/feed_articles_description.html'
feed_type = Atom1Feed
def __init__(self, featured=False, *args, **kwargs):
self.featured = featured
return super(ArticlesFeed, self).__init__(*args, **kwargs)
def title(self):
return u'The Boar%s' % (self.featured and ' (featured)' or '')
def _add_podcast_metadata(self, qs):
# TODO: 1.2 allows reverse select_related
items = []
for item in qs:
try:
item.podcast_metadata
except PodcastMetadata.DoesNotExist:
item._podcast_metadata_cache = None
items.append(item)
return items
def items(self):
qs = Article.objects.filter(published=True)
if self.featured:
qs = qs.filter(featured=True)
return self._add_podcast_metadata(qs[:ITEMS])
def item_title(self, item):
return item.title
def item_author_name(self, obj):
return u', '.join([a.get_full_name() for a in obj.authors.all()])
def item_author_link(self, obj):
if obj.authors.count() == 1:
return add_domain(Site.objects.get_current().domain, obj.authors.all()[0].get_absolute_url())
def item_pubdate(self, obj):
return obj.pub_date
def item_categories(self, obj):
return [t.name for t in obj.tags.all()]
def item_enclosure_url(self, obj):
if obj.podcast_metadata:
return obj.podcast_metadata.podcast.url
def item_enclosure_length(self, obj):
if obj.podcast_metadata:
return obj.podcast_metadata.size
item_enclosure_mime_type = "audio/mpeg"
class SectionFeed(ArticlesFeed):
def title(self, obj):
return u'The Boar: %s%s' % (obj.title, self.featured and ' (featured)' or '')
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return 'The latest from the %s section.' % obj.title.lower()
def get_object(self, request, slug):
return Section.objects.get(slug=slug)
def items(self, obj):
qs = Article.objects.filter(published=True, section=obj)
if self.featured:
qs = qs.filter(featured=True)
return self._add_podcast_metadata(qs[:ITEMS])
class TopicFeed(ArticlesFeed):
def title(self, obj):
return u'The Boar: %s: %s' % (obj['section'].title, obj['topic'])
def link(self, obj):
return u'/%s/%s/' % (obj['section'].slug, obj['topic'].slug)
def description(self, obj):
return u'The latest on "%s" from the %s section.' % (obj['topic'], obj['section'].title.lower())
def get_object(self, request, section_slug, topic_slug):
return {'topic': Tag.objects.get(slug=topic_slug), 'section': Section.objects.get(slug=section_slug)}
def items(self, obj):
return self._add_podcast_metadata(
Article.objects.filter(
published=True,
section=obj['section'],
tags__in=[obj['topic']]
)[:ITEMS]
)
class UserFeed(ArticlesFeed):
def title(self, obj):
return u'The Boar: %s' % obj.get_full_name()
def link(self, obj):
return obj.get_absolute_url()
def description(self, obj):
return u'The latest writing from %s.' % obj.get_full_name()
def get_object(self, request, slug):
return User.objects.get(username=slug)
def items(self, obj):
return self._add_podcast_metadata(
obj.article_set.filter(published=True)[:ITEMS]
)
|
bsd-3-clause
|
ossdemura/django-miniblog
|
src/Lib/site-packages/pip/_vendor/html5lib/_tokenizer.py
|
385
|
76580
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import unichr as chr
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from ._inputstream import HTMLInputStream
from ._trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
(allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, _ in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data) # pylint:disable=redefined-variable-type
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for _ in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
|
mit
|
unitusdev/unitus
|
contrib/seeds/generate-seeds.py
|
55
|
4341
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8333)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18333)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
mit
|
aclements/mtrace
|
mtrace-tools/crud/mtracepy/util.py
|
2
|
1615
|
import sqlite3
import hashlib
import sys
mtrace_label_heap = 1
mtrace_label_block = 2
mtrace_label_static = 3
mtrace_label_percpu = 4
mtrace_label_str = { mtrace_label_heap : 'heap',
mtrace_label_block : 'block',
mtrace_label_static : 'static',
mtrace_label_percpu : 'percpu' }
# XXX there must be a better way..
def uhex(i):
return (i & 0xffffffffffffffff)
def checksum(fileName, maxBytes = sys.maxint):
f = open(fileName,"rb")
m = hashlib.md5()
while True:
bc = min(maxBytes, 256)
bytes = f.read(bc)
if bytes == '':
break
m.update(bytes)
maxBytes -= bc
if maxBytes == 0:
break
f.close()
return m.digest()
def apply_filters(lst, filters, removed=None):
if len(filters) > 0:
lst2 = []
for e in lst:
lst2.append(e)
for f in filters:
if f.filter(e) == False:
if removed != None:
removed.append(e)
lst2.pop()
break
return lst2
else:
return lst
class MtraceDB:
def __init__(self, dbFile):
self.dbFile = dbFile
self.conn = sqlite3.connect(self.dbFile)
def exec_single(self, query):
c = self.conn.cursor()
c.execute(query)
rs = c.fetchall()
if len(rs) != 1:
raise Exception('%s returned %u rows' % (query, len(rs)))
r = rs[0]
c.close()
return r
|
gpl-2.0
|
MiseCZ/apila
|
tasks/Task.py
|
2
|
1396
|
class Task(object):
CHANGED = 'changed'
CREATED = 'created'
def __init__(self, name, params, config, register, when, tags, unknown_attributes):
self.name = name
self.params = params
self.config = config
self.unknown_attributes = unknown_attributes
self.register = register
self.when = when
self.tags = tags
def validate(self, errors):
for param in self.unknown_attributes:
errors.append( "In task '%(name)s' %(context)s unknown param %(param)s'" % { 'name': self.task_name, 'param': param, 'context': str({'name': self.name, self.task_name: self.params}) })
for param in self.params:
if param not in self.known_params:
errors.append( "In task '%(name)s' %(context)s unknown param '%(name)s.%(param)s'" % { 'name': self.task_name, 'param': param, 'context': str({'name': self.name, self.task_name: self.params}) })
for param in self.required_params:
if param not in self.params:
errors.append( "In task '%(name)s' %(context)s missing param '%(name)s.%(param)s'" % { 'name': self.task_name, 'param': param, 'context': str({'name': self.name, self.task_name: self.params}) })
for conf in self.required_configs:
if conf not in self.config:
errors.append( "Task '%(name)s' need field '%(conf)s' in config.yml" % { 'name': self.task_name, 'conf': conf})
def need_context(self):
return False
|
mit
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/contrib/predictor/saved_model_predictor.py
|
55
|
6579
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Predictor` constructed from a `SavedModel`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from tensorflow.contrib.predictor import predictor
from tensorflow.contrib.saved_model.python.saved_model import reader
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import signature_constants
DEFAULT_TAGS = 'serve'
_DEFAULT_INPUT_ALTERNATIVE_FORMAT = 'default_input_alternative:{}'
def get_meta_graph_def(saved_model_dir, tags):
"""Gets `MetaGraphDef` from a directory containing a `SavedModel`.
Returns the `MetaGraphDef` for the given tag-set and SavedModel directory.
Args:
saved_model_dir: Directory containing the SavedModel.
tags: Comma separated list of tags used to identify the correct
`MetaGraphDef`.
Raises:
ValueError: An error when the given tags cannot be found.
Returns:
A `MetaGraphDef` corresponding to the given tags.
"""
saved_model = reader.read_saved_model(saved_model_dir)
set_of_tags = set([tag.strip() for tag in tags.split(',')])
for meta_graph_def in saved_model.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set_of_tags:
return meta_graph_def
raise ValueError('Could not find MetaGraphDef with tags {}'.format(tags))
def _get_signature_def(signature_def_key, export_dir, tags):
"""Construct a `SignatureDef` proto."""
signature_def_key = (
signature_def_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
metagraph_def = get_meta_graph_def(export_dir, tags)
try:
signature_def = signature_def_utils.get_signature_def_by_key(
metagraph_def,
signature_def_key)
except ValueError as e:
try:
formatted_key = _DEFAULT_INPUT_ALTERNATIVE_FORMAT.format(
signature_def_key)
signature_def = signature_def_utils.get_signature_def_by_key(
metagraph_def, formatted_key)
logging.warning('Could not find signature def "%s". '
'Using "%s" instead', signature_def_key, formatted_key)
except ValueError:
raise ValueError(
'Got signature_def_key "{}". Available signatures are {}. '
'Original error:\n{}'.format(
signature_def_key, list(metagraph_def.signature_def), e))
return signature_def
def _check_signature_arguments(signature_def_key,
signature_def,
input_names,
output_names):
"""Validates signature arguments for `SavedModelPredictor`."""
signature_def_key_specified = signature_def_key is not None
signature_def_specified = signature_def is not None
input_names_specified = input_names is not None
output_names_specified = output_names is not None
if input_names_specified != output_names_specified:
raise ValueError(
'input_names and output_names must both be specified or both be '
'unspecified.'
)
if (signature_def_key_specified + signature_def_specified +
input_names_specified > 1):
raise ValueError(
'You must specify at most one of signature_def_key OR signature_def OR'
'(input_names AND output_names).'
)
class SavedModelPredictor(predictor.Predictor):
"""A `Predictor` constructed from a `SavedModel`."""
def __init__(self,
export_dir,
signature_def_key=None,
signature_def=None,
input_names=None,
output_names=None,
tags=None,
graph=None):
"""Initialize a `CoreEstimatorPredictor`.
Args:
export_dir: a path to a directory containing a `SavedModel`.
signature_def_key: Optional string specifying the signature to use. If
`None`, then `DEFAULT_SERVING_SIGNATURE_DEF_KEY` is used. Only one of
`signature_def_key` and `signature_def` should be specified.
signature_def: A `SignatureDef` proto specifying the inputs and outputs
for prediction. Only one of `signature_def_key` and `signature_def`
should be specified.
input_names: A dictionary mapping strings to `Tensor`s in the `SavedModel`
that represent the input. The keys can be any string of the user's
choosing.
output_names: A dictionary mapping strings to `Tensor`s in the
`SavedModel` that represent the output. The keys can be any string of
the user's choosing.
tags: Optional. Comma separated list of tags that will be used to retrieve
the correct `SignatureDef`. Defaults to `DEFAULT_TAGS`.
graph: Optional. The Tensorflow `graph` in which prediction should be
done.
Raises:
ValueError: If more than one of signature_def_key OR signature_def OR
(input_names AND output_names) is specified.
"""
_check_signature_arguments(
signature_def_key, signature_def, input_names, output_names)
tags = tags or DEFAULT_TAGS
self._graph = graph or ops.Graph()
with self._graph.as_default():
self._session = session.Session()
loader.load(self._session, tags.split(','), export_dir)
if input_names is None:
if signature_def is None:
signature_def = _get_signature_def(signature_def_key, export_dir, tags)
input_names = {k: v.name for k, v in signature_def.inputs.items()}
output_names = {k: v.name for k, v in signature_def.outputs.items()}
self._feed_tensors = {k: self._graph.get_tensor_by_name(v)
for k, v in input_names.items()}
self._fetch_tensors = {k: self._graph.get_tensor_by_name(v)
for k, v in output_names.items()}
|
apache-2.0
|
playfulgod/android_kernel_lge_kk_zee
|
scripts/gcc-wrapper.py
|
1276
|
3382
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
gpl-2.0
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/py-lockfile/package.py
|
5
|
2151
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyLockfile(PythonPackage):
"""The lockfile package exports a LockFile class which provides a
simple API for locking files. Unlike the Windows msvcrt.locking
function, the fcntl.lockf and flock functions, and the
deprecated posixfile module, the API is identical across both
Unix (including Linux and Mac) and Windows platforms. The lock
mechanism relies on the atomic nature of the link (on Unix) and
mkdir (on Windows) system calls. An implementation based on
SQLite is also provided, more as a demonstration of the
possibilities it provides than as production-quality code.
"""
homepage = "https://pypi.python.org/pypi/lockfile"
url = "https://pypi.io/packages/source/l/lockfile/lockfile-0.10.2.tar.gz"
version('0.10.2', '1aa6175a6d57f082cd12e7ac6102ab15')
depends_on("py-setuptools", type='build')
|
lgpl-2.1
|
cortedeltimo/SickRage
|
lib/github/tests/OrganizationHasInMembers.py
|
7
|
2192
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2016 Sam Corbett <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
class OrganizationHasInMembers(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.user = self.g.get_user("meneal")
self.org = self.g.get_organization("RobotWithFeelings")
self.has_in_members = self.org.has_in_members(self.user)
def testHasInMembers(self):
self.assertTrue(self.has_in_members)
|
gpl-3.0
|
V155/qutebrowser
|
qutebrowser/keyinput/modeparsers.py
|
1
|
12376
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""KeyChainParser for "hint" and "normal" modes.
Module attributes:
STARTCHARS: Possible chars for starting a commandline input.
"""
import traceback
import enum
from PyQt5.QtCore import pyqtSlot, Qt
from PyQt5.QtGui import QKeySequence
from qutebrowser.commands import runners, cmdexc
from qutebrowser.config import config
from qutebrowser.keyinput import basekeyparser, keyutils
from qutebrowser.utils import usertypes, log, message, objreg, utils
STARTCHARS = ":/?"
LastPress = enum.Enum('LastPress', ['none', 'filtertext', 'keystring'])
class CommandKeyParser(basekeyparser.BaseKeyParser):
"""KeyChainParser for command bindings.
Attributes:
_commandrunner: CommandRunner instance.
"""
def __init__(self, win_id, parent=None, supports_count=None):
super().__init__(win_id, parent, supports_count)
self._commandrunner = runners.CommandRunner(win_id)
def execute(self, cmdstr, count=None):
try:
self._commandrunner.run(cmdstr, count)
except cmdexc.Error as e:
message.error(str(e), stack=traceback.format_exc())
class NormalKeyParser(CommandKeyParser):
"""KeyParser for normal mode with added STARTCHARS detection and more.
Attributes:
_partial_timer: Timer to clear partial keypresses.
"""
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent, supports_count=True)
self._read_config('normal')
self._partial_timer = usertypes.Timer(self, 'partial-match')
self._partial_timer.setSingleShot(True)
self._partial_timer.timeout.connect(self._clear_partial_match)
self._inhibited = False
self._inhibited_timer = usertypes.Timer(self, 'normal-inhibited')
self._inhibited_timer.setSingleShot(True)
def __repr__(self):
return utils.get_repr(self)
def handle(self, e, *, dry_run=False):
"""Override to abort if the key is a startchar.
Args:
e: the KeyPressEvent from Qt.
dry_run: Don't actually execute anything, only check whether there
would be a match.
Return:
A self.Match member.
"""
txt = e.text().strip()
if self._inhibited:
self._debug_log("Ignoring key '{}', because the normal mode is "
"currently inhibited.".format(txt))
return QKeySequence.NoMatch
match = super().handle(e, dry_run=dry_run)
if match == QKeySequence.PartialMatch and not dry_run:
timeout = config.val.input.partial_timeout
if timeout != 0:
self._partial_timer.setInterval(timeout)
self._partial_timer.start()
return match
def set_inhibited_timeout(self, timeout):
"""Ignore keypresses for the given duration."""
if timeout != 0:
self._debug_log("Inhibiting the normal mode for {}ms.".format(
timeout))
self._inhibited = True
self._inhibited_timer.setInterval(timeout)
self._inhibited_timer.timeout.connect(self._clear_inhibited)
self._inhibited_timer.start()
@pyqtSlot()
def _clear_partial_match(self):
"""Clear a partial keystring after a timeout."""
self._debug_log("Clearing partial keystring {}".format(
self._sequence))
self._sequence = keyutils.KeySequence()
self.keystring_updated.emit(str(self._sequence))
@pyqtSlot()
def _clear_inhibited(self):
"""Reset inhibition state after a timeout."""
self._debug_log("Releasing inhibition state of normal mode.")
self._inhibited = False
class PassthroughKeyParser(CommandKeyParser):
"""KeyChainParser which passes through normal keys.
Used for insert/passthrough modes.
Attributes:
_mode: The mode this keyparser is for.
"""
do_log = False
passthrough = True
def __init__(self, win_id, mode, parent=None):
"""Constructor.
Args:
mode: The mode this keyparser is for.
parent: Qt parent.
warn: Whether to warn if an ignored key was bound.
"""
super().__init__(win_id, parent)
self._read_config(mode)
self._mode = mode
def __repr__(self):
return utils.get_repr(self, mode=self._mode)
class PromptKeyParser(CommandKeyParser):
"""KeyParser for yes/no prompts."""
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent, supports_count=False)
self._read_config('yesno')
def __repr__(self):
return utils.get_repr(self)
class HintKeyParser(CommandKeyParser):
"""KeyChainParser for hints.
Attributes:
_filtertext: The text to filter with.
_last_press: The nature of the last keypress, a LastPress member.
"""
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent, supports_count=False)
self._filtertext = ''
self._last_press = LastPress.none
self._read_config('hint')
self.keystring_updated.connect(self.on_keystring_updated)
def _handle_filter_key(self, e):
"""Handle keys for string filtering.
Return True if the keypress has been handled, and False if not.
Args:
e: the KeyPressEvent from Qt.
Return:
A QKeySequence match.
"""
log.keyboard.debug("Got filter key 0x{:x} text {}".format(
e.key(), e.text()))
hintmanager = objreg.get('hintmanager', scope='tab',
window=self._win_id, tab='current')
if e.key() == Qt.Key_Backspace:
log.keyboard.debug("Got backspace, mode {}, filtertext '{}', "
"sequence '{}'".format(self._last_press,
self._filtertext,
self._sequence))
if self._last_press == LastPress.filtertext and self._filtertext:
self._filtertext = self._filtertext[:-1]
hintmanager.filter_hints(self._filtertext)
return QKeySequence.ExactMatch
elif self._last_press == LastPress.keystring and self._sequence:
self._sequence = self._sequence[:-1]
self.keystring_updated.emit(str(self._sequence))
if not self._sequence and self._filtertext:
# Switch back to hint filtering mode (this can happen only
# in numeric mode after the number has been deleted).
hintmanager.filter_hints(self._filtertext)
self._last_press = LastPress.filtertext
return QKeySequence.ExactMatch
else:
return QKeySequence.NoMatch
elif hintmanager.current_mode() != 'number':
return QKeySequence.NoMatch
elif not e.text():
return QKeySequence.NoMatch
else:
self._filtertext += e.text()
hintmanager.filter_hints(self._filtertext)
self._last_press = LastPress.filtertext
return QKeySequence.ExactMatch
def handle(self, e, *, dry_run=False):
"""Handle a new keypress and call the respective handlers.
Args:
e: the KeyPressEvent from Qt
dry_run: Don't actually execute anything, only check whether there
would be a match.
Returns:
True if the match has been handled, False otherwise.
"""
if dry_run:
return super().handle(e, dry_run=True)
if keyutils.is_special(e.key(), e.modifiers()):
log.keyboard.debug("Got special key, clearing keychain")
self.clear_keystring()
assert not dry_run
match = super().handle(e)
if match == QKeySequence.PartialMatch:
self._last_press = LastPress.keystring
elif match == QKeySequence.ExactMatch:
self._last_press = LastPress.none
elif match == QKeySequence.NoMatch:
# We couldn't find a keychain so we check if it's a special key.
return self._handle_filter_key(e)
else:
raise ValueError("Got invalid match type {}!".format(match))
return match
def update_bindings(self, strings, preserve_filter=False):
"""Update bindings when the hint strings changed.
Args:
strings: A list of hint strings.
preserve_filter: Whether to keep the current value of
`self._filtertext`.
"""
self._read_config()
self.bindings.update({keyutils.KeySequence.parse(s):
'follow-hint -s ' + s for s in strings})
if not preserve_filter:
self._filtertext = ''
@pyqtSlot(str)
def on_keystring_updated(self, keystr):
"""Update hintmanager when the keystring was updated."""
hintmanager = objreg.get('hintmanager', scope='tab',
window=self._win_id, tab='current')
hintmanager.handle_partial_key(keystr)
class CaretKeyParser(CommandKeyParser):
"""KeyParser for caret mode."""
passthrough = True
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent, supports_count=True)
self._read_config('caret')
class RegisterKeyParser(CommandKeyParser):
"""KeyParser for modes that record a register key.
Attributes:
_mode: One of KeyMode.set_mark, KeyMode.jump_mark, KeyMode.record_macro
and KeyMode.run_macro.
"""
def __init__(self, win_id, mode, parent=None):
super().__init__(win_id, parent, supports_count=False)
self._mode = mode
self._read_config('register')
def handle(self, e, *, dry_run=False):
"""Override handle to always match the next key and use the register.
Args:
e: the KeyPressEvent from Qt.
dry_run: Don't actually execute anything, only check whether there
would be a match.
Return:
True if event has been handled, False otherwise.
"""
match = super().handle(e, dry_run=dry_run)
if match or dry_run:
return match
if keyutils.is_special(e.key(), e.modifiers()):
# this is not a proper register key, let it pass and keep going
return QKeySequence.NoMatch
key = e.text()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
macro_recorder = objreg.get('macro-recorder')
try:
if self._mode == usertypes.KeyMode.set_mark:
tabbed_browser.set_mark(key)
elif self._mode == usertypes.KeyMode.jump_mark:
tabbed_browser.jump_mark(key)
elif self._mode == usertypes.KeyMode.record_macro:
macro_recorder.record_macro(key)
elif self._mode == usertypes.KeyMode.run_macro:
macro_recorder.run_macro(self._win_id, key)
else:
raise ValueError(
"{} is not a valid register mode".format(self._mode))
except cmdexc.Error as err:
message.error(str(err), stack=traceback.format_exc())
self.request_leave.emit(self._mode, "valid register key", True)
return QKeySequence.ExactMatch
|
gpl-3.0
|
vicky2135/lucious
|
oscar/lib/python2.7/site-packages/django/conf/locale/hu/formats.py
|
504
|
1117
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y. F j.'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = 'Y. F j. G.i'
YEAR_MONTH_FORMAT = 'Y. F'
MONTH_DAY_FORMAT = 'F j.'
SHORT_DATE_FORMAT = 'Y.m.d.'
SHORT_DATETIME_FORMAT = 'Y.m.d. G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y.%m.%d.', # '2006.10.25.'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y.%m.%d. %H.%M.%S', # '2006.10.25. 14.30.59'
'%Y.%m.%d. %H.%M.%S.%f', # '2006.10.25. 14.30.59.000200'
'%Y.%m.%d. %H.%M', # '2006.10.25. 14.30'
'%Y.%m.%d.', # '2006.10.25.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
|
bsd-3-clause
|
luizbafilho/fusis
|
vendor/github.com/osrg/gobgp/test/scenario_test/bgp_router_test.py
|
5
|
16341
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import sys
import time
import unittest
from fabric.api import local
import nose
from lib.noseplugin import OptionParser, parser_option
from lib import base
from lib.base import (
BGP_FSM_IDLE,
BGP_FSM_ACTIVE,
BGP_FSM_ESTABLISHED,
BGP_ATTR_TYPE_MULTI_EXIT_DISC,
BGP_ATTR_TYPE_LOCAL_PREF,
wait_for_completion,
)
from lib.gobgp import (
GoBGPContainer,
extract_path_attribute,
)
from lib.quagga import QuaggaBGPContainer
from lib.exabgp import ExaBGPContainer
class GoBGPTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
q1 = QuaggaBGPContainer(name='q1', asn=65001, router_id='192.168.0.2')
q2 = QuaggaBGPContainer(name='q2', asn=65002, router_id='192.168.0.3')
q3 = QuaggaBGPContainer(name='q3', asn=65003, router_id='192.168.0.4')
qs = [q1, q2, q3]
ctns = [g1, q1, q2, q3]
# advertise a route from q1, q2, q3
for idx, q in enumerate(qs):
route = '10.0.{0}.0/24'.format(idx + 1)
q.add_route(route)
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
for q in qs:
g1.add_peer(q, reload_config=False, passwd='passwd')
q.add_peer(g1, passwd='passwd', passive=True)
g1.create_config()
g1.reload_config()
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
for q in self.quaggas.itervalues():
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
def test_02_check_gobgp_global_rib(self):
for q in self.quaggas.itervalues():
# paths expected to exist in gobgp's global rib
routes = q.routes.keys()
timeout = 120
interval = 1
count = 0
while True:
# gobgp's global rib
state = self.gobgp.get_neighbor_state(q)
self.assertEqual(state, BGP_FSM_ESTABLISHED)
global_rib = [p['prefix'] for p in self.gobgp.get_global_rib()]
for p in global_rib:
if p in routes:
routes.remove(p)
if len(routes) == 0:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
# check gobgp properly add it's own asn to aspath
def test_03_check_gobgp_adj_out_rib(self):
for q in self.quaggas.itervalues():
for path in self.gobgp.get_adj_rib_out(q):
asns = path['aspath']
self.assertTrue(self.gobgp.asn in asns)
# check routes are properly advertised to all BGP speaker
def test_04_check_quagga_global_rib(self):
interval = 1
timeout = int(120 / interval)
for q in self.quaggas.itervalues():
done = False
for _ in range(timeout):
if done:
break
global_rib = q.get_global_rib()
global_rib = [p['prefix'] for p in global_rib]
if len(global_rib) < len(self.quaggas):
time.sleep(interval)
continue
self.assertTrue(len(global_rib) == len(self.quaggas))
for c in self.quaggas.itervalues():
for r in c.routes:
self.assertTrue(r in global_rib)
done = True
if done:
continue
# should not reach here
raise AssertionError
def test_05_add_quagga(self):
q4 = QuaggaBGPContainer(name='q4', asn=65004, router_id='192.168.0.5')
self.quaggas['q4'] = q4
q4.add_route('10.0.4.0/24')
initial_wait_time = q4.run()
time.sleep(initial_wait_time)
self.gobgp.add_peer(q4)
q4.add_peer(self.gobgp)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q4)
def test_06_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_07_stop_one_quagga(self):
g1 = self.gobgp
q4 = self.quaggas['q4']
q4.stop()
self.gobgp.wait_for(expected_state=BGP_FSM_ACTIVE, peer=q4)
g1.del_peer(q4)
del self.quaggas['q4']
# check gobgp properly send withdrawal message with q4's route
def test_08_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_09_add_distant_relative(self):
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q5 = QuaggaBGPContainer(name='q5', asn=65005, router_id='192.168.0.6')
initial_wait_time = q5.run()
time.sleep(initial_wait_time)
for q in [q2, q3]:
q5.add_peer(q)
q.add_peer(q5)
med200 = {'name': 'med200',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 200}
q2.add_policy(med200, self.gobgp, 'out')
med100 = {'name': 'med100',
'type': 'permit',
'match': '0.0.0.0/0',
'med': 100}
q3.add_policy(med100, self.gobgp, 'out')
q5.add_route('10.0.6.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q2)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q3)
q2.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
q3.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
timeout = 120
interval = 1
count = 0
while True:
paths = self.gobgp.get_adj_rib_out(q1, '10.0.6.0/24')
if len(paths) > 0:
path = paths[0]
print "{0}'s nexthop is {1}".format(path['nlri']['prefix'],
path['nexthop'])
n_addrs = [i[1].split('/')[0] for i in self.gobgp.ip_addrs]
if path['nexthop'] in n_addrs:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
def test_10_originate_path(self):
self.gobgp.add_route('10.10.0.0/24')
dst = self.gobgp.get_global_rib('10.10.0.0/24')
self.assertTrue(len(dst) == 1)
self.assertTrue(len(dst[0]['paths']) == 1)
path = dst[0]['paths'][0]
self.assertTrue(path['nexthop'] == '0.0.0.0')
self.assertTrue(len(path['aspath']) == 0)
def test_11_check_adj_rib_out(self):
for q in self.quaggas.itervalues():
paths = self.gobgp.get_adj_rib_out(q, '10.10.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
peer_info = self.gobgp.peers[q]
local_addr = peer_info['local_addr'].split('/')[0]
self.assertTrue(path['nexthop'] == local_addr)
self.assertTrue(path['aspath'] == [self.gobgp.asn])
def test_12_disable_peer(self):
q1 = self.quaggas['q1']
self.gobgp.disable_peer(q1)
self.gobgp.wait_for(expected_state=BGP_FSM_IDLE, peer=q1)
time.sleep(3)
for route in q1.routes.iterkeys():
dst = self.gobgp.get_global_rib(route)
self.assertTrue(len(dst) == 0)
for q in self.quaggas.itervalues():
if q is q1:
continue
paths = self.gobgp.get_adj_rib_out(q, route)
self.assertTrue(len(paths) == 0)
def test_13_enable_peer(self):
q1 = self.quaggas['q1']
self.gobgp.enable_peer(q1)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q1)
def test_14_check_adj_rib_out(self):
self.test_11_check_adj_rib_out()
def test_15_check_active_connection(self):
g1 = self.gobgp
g2 = GoBGPContainer(name='g2', asn=65000, router_id='192.168.0.7',
ctn_image_name=self.gobgp.image,
log_level=parser_option.gobgp_log_level)
time.sleep(g2.run())
self.quaggas['g2'] = g2
g2.add_peer(g1, passive=True)
g1.add_peer(g2)
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g2)
def test_16_check_local_pref_and_med_handling(self):
g1 = self.gobgp
g1.add_route('10.20.0.0/24', local_pref=1000, med=2000)
# iBGP peer
g2 = self.quaggas['g2']
paths = g2.get_global_rib('10.20.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(len(paths[0]['paths']) == 1)
path = paths[0]['paths'][0]
local_pref = extract_path_attribute(path, BGP_ATTR_TYPE_LOCAL_PREF)
self.assertTrue(local_pref['value'] == 1000)
med = extract_path_attribute(path, BGP_ATTR_TYPE_MULTI_EXIT_DISC)
self.assertTrue(med['metric'] == 2000)
# eBGP peer
q1 = self.quaggas['q1']
paths = q1.get_global_rib('10.20.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
local_pref = extract_path_attribute(path, BGP_ATTR_TYPE_LOCAL_PREF)
# local_pref's default value is 100
self.assertTrue(local_pref['value'] == 100)
med = extract_path_attribute(path, BGP_ATTR_TYPE_MULTI_EXIT_DISC)
self.assertTrue(med['metric'] == 2000)
def test_17_check_shutdown(self):
g1 = self.gobgp
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q2.add_route('20.0.0.0/24')
q3.add_route('20.0.0.0/24')
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
paths = q1.get_global_rib('20.0.0.0/24')
self.assertTrue(len(paths) == 1)
n_addrs = [i[1].split('/')[0] for i in self.gobgp.ip_addrs]
self.assertTrue(paths[0]['nexthop'] in n_addrs)
q3.stop()
time.sleep(3)
paths = q1.get_global_rib('20.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(paths[0]['nexthop'] in n_addrs)
g1.del_peer(q3)
del self.quaggas['q3']
def test_18_check_withdrawal(self):
g1 = self.gobgp
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
g1.add_route('30.0.0.0/24')
q1.add_route('30.0.0.0/24')
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
paths = g1.get_adj_rib_out(q1, '30.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue('source-id' not in paths[0])
paths = g1.get_adj_rib_out(q2, '30.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue('source-id' not in paths[0])
g1.local('gobgp global rib del 30.0.0.0/24')
paths = g1.get_adj_rib_out(q1, '30.0.0.0/24')
self.assertTrue(len(paths) == 0)
paths = g1.get_adj_rib_out(q2, '30.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(paths[0]['source-id'] == '192.168.0.2')
def test_19_check_grpc_add_neighbor(self):
g1 = self.gobgp
e1 = ExaBGPContainer(name='e1', asn=65000, router_id='192.168.0.7')
time.sleep(e1.run())
e1.add_peer(g1)
self.quaggas['e1'] = e1
n = e1.peers[g1]['local_addr'].split('/')[0]
g1.local('gobgp n add {0} as 65000'.format(n))
g1.add_peer(e1, reload_config=False)
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=e1)
def test_20_check_grpc_del_neighbor(self):
g1 = self.gobgp
e1 = self.quaggas['e1']
n = e1.peers[g1]['local_addr'].split('/')[0]
g1.local('gobgp n del {0}'.format(n))
g1.del_peer(e1, reload_config=False)
def test_21_check_withdrawal_2(self):
g1 = self.gobgp
g2 = self.quaggas['g2']
prefix = '40.10.0.0/24'
g1.add_route(prefix)
wait_for_completion(lambda: len(g1.get_global_rib(prefix)) == 1)
wait_for_completion(lambda: len(g2.get_global_rib(prefix)) == 1)
r = g2.local('gobgp monitor global rib -j', stream=True, tty=False)
g1.local('gobgp global rib del 40.10.0.0/24')
del g1.routes[prefix]
wait_for_completion(lambda: len(g1.get_global_rib(prefix)) == 0)
wait_for_completion(lambda: len(g2.get_global_rib(prefix)) == 0)
ret = json.loads(r.next())
self.assertTrue(ret[0]['nlri']['prefix'] == prefix)
self.assertTrue('withdrawal' in ret[0])
def test_22_check_cli_sorted(self):
g1 = self.gobgp
cnt = 0
def next_prefix():
for i in range(100, 105):
for j in range(100, 105):
yield '{0}.{1}.0.0/24'.format(i, j)
for p in next_prefix():
g1.local('gobgp global rib add {0}'.format(p))
cnt += 1
cnt2 = 0
g = next_prefix()
n = g.next()
for path in g1.local("gobgp global rib", capture=True).split('\n')[1:]:
if [elem for elem in path.split(' ') if elem != ''][1] == n:
try:
cnt2 += 1
n = g.next()
except StopIteration:
break
self.assertTrue(cnt == cnt2)
def test_23_check_withdrawal3(self):
gobgp_ctn_image_name = parser_option.gobgp_image
g1 = self.gobgp
g3 = GoBGPContainer(name='g3', asn=65006, router_id='192.168.0.8',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
g4 = GoBGPContainer(name='g4', asn=65007, router_id='192.168.0.9',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
initial_wait_time = max(ctn.run() for ctn in [g3, g4])
time.sleep(initial_wait_time)
self.quaggas = {'g3': g3, 'g4': g4}
g3.local('gobgp global rib add 50.0.0.0/24')
g1.add_peer(g3, passive=True)
g3.add_peer(g1)
g1.add_peer(g4, passive=True)
g4.add_peer(g1)
self.test_01_neighbor_established()
self.test_02_check_gobgp_global_rib()
g4.local('gobgp global rib add 50.0.0.0/24 med 10')
paths = g1.get_adj_rib_out(g3, '50.0.0.0/24')
self.assertTrue(len(paths) == 0)
paths = g1.get_adj_rib_out(g4, '50.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(paths[0]['source-id'] == '192.168.0.8')
g3.local('gobgp global rib del 50.0.0.0/24')
paths = g1.get_adj_rib_out(g3, '50.0.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(paths[0]['source-id'] == '192.168.0.9')
paths = g1.get_adj_rib_out(g4, '50.0.0.0/24')
self.assertTrue(len(paths) == 0)
if __name__ == '__main__':
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print "docker not found"
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
|
mit
|
phrocker/accumulo
|
test/system/auto/simple/createAndUse.py
|
2
|
1105
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from JavaTest import JavaTest
import unittest
class CreateAndUseTest(JavaTest):
"Test creating and immediately using a table"
order = 21
testClass="org.apache.accumulo.test.functional.CreateAndUseTest"
def suite():
result = unittest.TestSuite()
result.addTest(CreateAndUseTest())
return result
|
apache-2.0
|
eubr-bigsea/tahiti
|
migrations/versions/c0510936c9a5_updating_logistic_regression_solver_.py
|
1
|
2089
|
"""updating logistic regression solver param
Revision ID: c0510936c9a5
Revises: dbb12fc54827
Create Date: 2020-01-08 16:13:56.760866
"""
from alembic import op
import sqlalchemy as sa
from alembic import op
import sqlalchemy as sa
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column, text
import json
# revision identifiers, used by Alembic.
revision = 'c0510936c9a5'
down_revision = 'dbb12fc54827'
branch_labels = None
depends_on = None
SCIKIT_LEARN_PLATAFORM_ID = 4
ID_OPERATION = 4021
all_commands = [
('UPDATE operation_form_field SET operation_form_field.default = "lbfgs" WHERE id=4004 AND form_id=4001',
'UPDATE operation_form_field SET operation_form_field.default = "liblinear" WHERE id=4004 AND form_id=4001')
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in all_commands:
if isinstance(cmd[0], str):
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
connection.execute('SET FOREIGN_KEY_CHECKS=0;')
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
connection.execute('SET FOREIGN_KEY_CHECKS=1;')
except:
session.rollback()
raise
session.commit()
|
apache-2.0
|
spasovski/zamboni
|
apps/amo/tests/test_messages.py
|
6
|
2247
|
# -*- coding: utf-8 -*-
import django.contrib.messages as django_messages
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
from nose.tools import eq_
from tower import ugettext as _
from amo.messages import _make_message, info
def test_xss():
title = "<script>alert(1)</script>"
message = "<script>alert(2)</script>"
r = _make_message(title)
assert "<script>alert(1)</script>" in r
r = _make_message(None, message)
assert "<script>alert(2)</script>" in r
r = _make_message(title, title_safe=True)
assert "<script>alert(1)</script>" in r
r = _make_message(None, message, message_safe=True)
assert "<script>alert(2)</script>" in r
# Make sure safe flags are independent
r = _make_message(title, message_safe=True)
assert "<script>alert(1)</script>" in r
r = _make_message(None, message, title_safe=True)
assert "<script>alert(2)</script>" in r
def test_no_dupes():
"""Test that duplicate messages aren't saved."""
request = HttpRequest()
setattr(request, '_messages', default_storage(request))
info(request, 'Title', 'Body')
info(request, 'Title', 'Body')
info(request, 'Another Title', 'Another Body')
storage = django_messages.get_messages(request)
eq_(len(storage), 2, 'Too few or too many messages recorded.')
def test_l10n_dups():
"""Test that L10n values are preserved."""
request = HttpRequest()
setattr(request, '_messages', default_storage(request))
info(request, _('Title'), _('Body'))
info(request, _('Title'), _('Body'))
info(request, _('Another Title'), _('Another Body'))
storage = django_messages.get_messages(request)
eq_(len(storage), 2, 'Too few or too many messages recorded.')
def test_unicode_dups():
"""Test that unicode values are preserved."""
request = HttpRequest()
setattr(request, '_messages', default_storage(request))
info(request, u'Titlé', u'Body')
info(request, u'Titlé', u'Body')
info(request, u'Another Titlé', u'Another Body')
storage = django_messages.get_messages(request)
eq_(len(storage), 2, 'Too few or too many messages recorded.')
|
bsd-3-clause
|
grnet/synnefo
|
snf-cyclades-app/synnefo/logic/allocators/base.py
|
2
|
2618
|
# Copyright (C) 2010-2016 GRNET S.A. and individual contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class AllocatorBase(object):
def filter_backends(self, backends, vm):
"""The `filter_backends` method takes 2 arguments:
1. A list of the available backends. A backend is available
if it is not drained or offline. Each backend is a django object
and is an instance of the `Backend` model.
2. A map with 3 keys:
- `ram`: The size of the memory we want to allocate
on the backend.
- `disk`: The size of the disk we want to allocate
on the backend.
- `cpu`: The size of the CPU we want to allocate
on the backend.
The `Backend` model instances are not locked in the database
so changing their attributes is not advised. The `filter_backends`
method should treat the instances as "Read Only".
"""
raise NotImplementedError(
'The implementation of `filter_backends` is required.'
)
def allocate(self, backends, vm):
"""The `allocate` method takes 2 arguments:
1. A list of the available backends. A backend is available
if it is not drained or offline. Each backend is a django object
and is an instance of the `Backend` model.
2. A map with 3 keys:
- `ram`: The size of the memory we want to allocate
on the backend.
- `disk`: The size of the disk we want to allocate
on the backend.
- `cpu`: The size of the CPU we want to allocate
on the backend.
The `Backend` model instances are now locked in the database.
Be warned that some attributes of the backends that were given
on the `filter_backends` function may have changed, so it is suggested
you double check the backends.
"""
raise NotImplementedError(
'The implementation of `allocate` is required'
)
|
gpl-3.0
|
gilestrolab/ethoscope
|
scripts/tools/incremental_size_upload.py
|
1
|
2210
|
"""
A tool to backup incrementally one source to one destination directory.
It copies files to the destination if and only if they are larger than in the destination
"""
from optparse import OptionParser
import os
import shutil
import glob
import time
def copy_one_file(src, dst):
src_size = os.stat(src).st_size
if os.path.exists(dst):
dst_size = os.stat(dst).st_size
else:
dst_size = 0
if src_size > dst_size:
target_dir = os.path.dirname(dst)
if not os.path.exists(target_dir ):
os.makedirs(target_dir)
if (VERBOSE):
print (src + " =======> " + dst)
shutil.copy2(src, dst)
return 1, src_size
if (VERBOSE):
print("Skipping", src)
return 0, src_size
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-s", "--src-dir", dest="src_dir", help="The source directory to be mirrored")
parser.add_option("-d", "--dst-dir", dest="dst_dir", help="The destination dir to save data to")
parser.add_option("-v", "--verbose", dest="verbose", help="Print progress/info", default=False, action="store_true")
(options, args) = parser.parse_args()
option_dict = vars(options)
LOCAL_RESULTS_ROOT = option_dict["src_dir"]
REMOTE_RESULTS_ROOT = option_dict["dst_dir"]
VERBOSE= option_dict["verbose"]
PATTERN = '*.db'
start_t = time.time()
total = 0.0
processed = 0.0
total_size = 0.0
processed_size = 0.0
for x in sorted(os.walk(LOCAL_RESULTS_ROOT)):
for abs_path in glob.glob(os.path.join(x[0], PATTERN)):
rel_path = os.path.relpath(abs_path, start=LOCAL_RESULTS_ROOT)
target_abs_path = os.path.join(REMOTE_RESULTS_ROOT, rel_path)
pr, size = copy_one_file(abs_path, target_abs_path)
processed += pr
if pr:
processed_size += size
total += 1
total_size += size
delta_t = time.time() - start_t
print("Backup finished. In %i s" % delta_t)
print("%i files processed. %i files in total" % (processed, total))
print("%f GB transferred. %f GB in total" % (processed_size/ 2 **30 , total_size / 2 **30))
|
gpl-3.0
|
jjoaonunes/namebench
|
libnamebench/mocks.py
|
175
|
2503
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocks for tests."""
__author__ = '[email protected] (Thomas Stromberg)'
import time
import nameserver
# external dependencies (from third_party)
import dns.message
import dns.rdataclass
import dns.query
GOOD_IP = '127.0.0.1'
SLOW_IP = '9.9.9.9'
PERFECT_IP = '127.127.127.127'
NO_RESPONSE_IP = '10.0.0.1'
BROKEN_IP = '192.168.0.1'
class MockNameServer(nameserver.NameServer):
"""Act like Nameserver, but do not issue any actual queries!"""
def FakeAnswer(self, request, no_answer=False):
if not request:
request = self.CreateRequest('www.com', 'A', dns.rdataclass.IN)
response_text = """id 999
opcode QUERY
rcode NOERROR
flags QR RD RA
;QUESTION
www.paypal.com. IN A
;ANSWER
www.paypal.com. 159 IN A 66.211.169.65
www.paypal.com. 159 IN A 66.211.169.2
;AUTHORITY
paypal.com. 3459 IN NS ppns1.den.paypal.com.
paypal.com. 3459 IN NS ppns1.phx.paypal.com.
paypal.com. 3459 IN NS ppns2.den.paypal.com.
paypal.com. 3459 IN NS ppns2.phx.paypal.com.
;ADDITIONAL
ppns1.den.paypal.com. 165480 IN A 216.113.188.121
ppns1.phx.paypal.com. 73170 IN A 66.211.168.226
ppns2.den.paypal.com. 73170 IN A 216.113.188.122
ppns2.phx.paypal.com. 73170 IN A 66.211.168.227"""
msg = dns.message.from_text(response_text)
msg.question = request.question
if no_answer:
msg.answer = None
return msg
def Query(self, request, timeout):
"""Return a falsified DNS response."""
question = str(request.question[0])
if self.ip == BROKEN_IP:
raise dns.query.BadResponse('This sucks.')
if self.ip == NO_RESPONSE_IP:
answer = self.FakeAnswer(request, no_answer=True)
elif self.ip == GOOD_IP and 'www.google.com' in question:
answer = self.FakeAnswer(request, no_answer=True)
else:
answer = self.FakeAnswer(request)
if self.ip == GOOD_IP:
time.sleep(0.001)
elif self.ip == SLOW_IP:
time.sleep(0.03)
return answer
|
apache-2.0
|
jerryge/zulip
|
zerver/lib/db.py
|
120
|
1569
|
from __future__ import absolute_import
import time
from psycopg2.extensions import cursor, connection
# Similar to the tracking done in Django's CursorDebugWrapper, but done at the
# psycopg2 cursor level so it works with SQLAlchemy.
def wrapper_execute(self, action, sql, params=()):
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
self.connection.queries.append({
'time': "%.3f" % duration,
})
class TimeTrackingCursor(cursor):
"""A psycopg2 cursor class that tracks the time spent executing queries."""
def execute(self, query, vars=None):
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, query, vars)
def executemany(self, query, vars):
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, query, vars)
class TimeTrackingConnection(connection):
"""A psycopg2 connection class that uses TimeTrackingCursors."""
def __init__(self, *args, **kwargs):
self.queries = []
super(TimeTrackingConnection, self).__init__(*args, **kwargs)
def cursor(self, name=None):
if name is None:
return super(TimeTrackingConnection, self).cursor(cursor_factory=TimeTrackingCursor)
else:
return super(TimeTrackingConnection, self).cursor(name, cursor_factory=TimeTrackingCursor)
def reset_queries():
from django.db import connections
for conn in connections.all():
conn.connection.queries = []
|
apache-2.0
|
kazemakase/scikit-learn
|
sklearn/tree/export.py
|
75
|
15670
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
|
bsd-3-clause
|
zaina/nova
|
nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py
|
18
|
22740
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from webob import exc
from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
from nova.api.openstack.compute.plugins.v3 import hypervisors \
as hypervisors_v21
from nova.api.openstack import extensions
from nova.cells import utils as cells_utils
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
TEST_HYPERS = [
dict(id=1,
service_id=1,
host="compute1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip=netaddr.IPAddress('1.1.1.1')),
dict(id=2,
service_id=2,
host="compute2",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
host_ip=netaddr.IPAddress('2.2.2.2'))]
TEST_SERVICES = [
objects.Service(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
objects.Service(id=2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
disabled_reason=None,
availability_zone="nova"),
]
TEST_HYPERS_OBJ = [objects.ComputeNode(**hyper_dct)
for hyper_dct in TEST_HYPERS]
TEST_HYPERS[0].update({'service': TEST_SERVICES[0]})
TEST_HYPERS[1].update({'service': TEST_SERVICES[1]})
TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
dict(name="inst2", uuid="uuid2", host="compute2"),
dict(name="inst3", uuid="uuid3", host="compute1"),
dict(name="inst4", uuid="uuid4", host="compute2")]
def fake_compute_node_get_all(context):
return TEST_HYPERS_OBJ
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS_OBJ
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS_OBJ:
if hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
def fake_service_get_by_compute_host(context, host):
for service in TEST_SERVICES:
if service.host == host:
return service
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS_OBJ:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += hyper[key]
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
inst_obj = fake_instance.fake_instance_obj(context, **inst)
results.append(inst_obj)
return results
class HypervisorsTestV21(test.NoDBTestCase):
# copying the objects locally so the cells testcases can provide their own
TEST_HYPERS_OBJ = copy.deepcopy(TEST_HYPERS_OBJ)
TEST_SERVICES = copy.deepcopy(TEST_SERVICES)
TEST_SERVERS = copy.deepcopy(TEST_SERVERS)
DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
DETAIL_HYPERS_DICTS[0].update({'state': 'up',
'status': 'enabled',
'service': dict(id=1, host='compute1',
disabled_reason=None)})
DETAIL_HYPERS_DICTS[1].update({'state': 'up',
'status': 'enabled',
'service': dict(id=2, host='compute2',
disabled_reason=None)})
INDEX_HYPER_DICTS = [
dict(id=1, hypervisor_hostname="hyper1",
state='up', status='enabled'),
dict(id=2, hypervisor_hostname="hyper2",
state='up', status='enabled')]
def _get_request(self, use_admin_context):
return fakes.HTTPRequest.blank('', use_admin_context=use_admin_context)
def _set_up_controller(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def setUp(self):
super(HypervisorsTestV21, self).setUp()
self._set_up_controller()
self.rule_hyp_show = "os_compute_api:os-hypervisors"
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
fake_compute_node_get_all)
self.stubs.Set(self.controller.host_api, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor)
self.stubs.Set(self.controller.host_api, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(db, 'compute_node_statistics',
fake_compute_node_statistics)
def test_view_hypervisor_nodetail_noservers(self):
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], False)
self.assertEqual(result, self.INDEX_HYPER_DICTS[0])
def test_view_hypervisor_detail_noservers(self):
result = self.controller._view_hypervisor(
self.TEST_HYPERS_OBJ[0], self.TEST_SERVICES[0], True)
self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
def test_view_hypervisor_servers(self):
result = self.controller._view_hypervisor(self.TEST_HYPERS_OBJ[0],
self.TEST_SERVICES[0],
False, self.TEST_SERVERS)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'servers': [
dict(name="inst1", uuid="uuid1"),
dict(name="inst2", uuid="uuid2"),
dict(name="inst3", uuid="uuid3"),
dict(name="inst4", uuid="uuid4")]})
self.assertEqual(result, expected_dict)
def test_index(self):
req = self._get_request(True)
result = self.controller.index(req)
self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
def test_index_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_detail(self):
req = self._get_request(True)
result = self.controller.detail(req)
self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
def test_detail_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.detail, req)
def test_show_noid(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_show_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
def test_show_withid(self):
req = self._get_request(True)
result = self.controller.show(req, self.TEST_HYPERS_OBJ[0].id)
self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
def test_show_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req,
self.TEST_HYPERS_OBJ[0].id)
def test_uptime_noid(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, '3')
def test_uptime_notimplemented(self):
def fake_get_host_uptime(context, hyp):
raise exc.HTTPNotImplemented()
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req,
self.TEST_HYPERS_OBJ[0].id)
def test_uptime_implemented(self):
def fake_get_host_uptime(context, hyp):
return "fake uptime"
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = self._get_request(True)
result = self.controller.uptime(req, self.TEST_HYPERS_OBJ[0].id)
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
expected_dict.update({'uptime': "fake uptime"})
self.assertEqual(result, dict(hypervisor=expected_dict))
def test_uptime_non_integer_id(self):
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
def test_uptime_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.uptime, req,
self.TEST_HYPERS_OBJ[0].id)
def test_search(self):
req = self._get_request(True)
result = self.controller.search(req, 'hyper')
self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
def test_search_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.search, req,
self.TEST_HYPERS_OBJ[0].id)
def test_search_non_exist(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
def test_servers(self, mock_get):
req = self._get_request(True)
result = self.controller.servers(req, 'hyper')
expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
expected_dict[0].update({'servers': [
dict(uuid="uuid1"),
dict(uuid="uuid3")]})
expected_dict[1].update({'servers': [
dict(uuid="uuid2"),
dict(uuid="uuid4")]})
for output in result['hypervisors']:
servers = output['servers']
for server in servers:
del server['name']
self.assertEqual(result, dict(hypervisors=expected_dict))
def test_servers_non_id(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers,
req, '115')
def test_servers_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.servers, req,
self.TEST_HYPERS_OBJ[0].id)
def test_servers_with_non_integer_hypervisor_id(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = self._get_request(True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'abc')
def test_servers_with_no_server(self):
def fake_instance_get_all_by_host_return_empty(context, hypervisor_re):
return []
self.stubs.Set(self.controller.host_api, 'instance_get_all_by_host',
fake_instance_get_all_by_host_return_empty)
req = self._get_request(True)
result = self.controller.servers(req, self.TEST_HYPERS_OBJ[0].id)
self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
def test_statistics(self):
req = self._get_request(True)
result = self.controller.statistics(req)
self.assertEqual(result, dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)))
def test_statistics_non_admin(self):
req = self._get_request(False)
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.statistics, req)
class HypervisorsTestV2(HypervisorsTestV21):
DETAIL_HYPERS_DICTS = copy.deepcopy(
HypervisorsTestV21.DETAIL_HYPERS_DICTS)
del DETAIL_HYPERS_DICTS[0]['state']
del DETAIL_HYPERS_DICTS[1]['state']
del DETAIL_HYPERS_DICTS[0]['status']
del DETAIL_HYPERS_DICTS[1]['status']
del DETAIL_HYPERS_DICTS[0]['service']['disabled_reason']
del DETAIL_HYPERS_DICTS[1]['service']['disabled_reason']
del DETAIL_HYPERS_DICTS[0]['host_ip']
del DETAIL_HYPERS_DICTS[1]['host_ip']
INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS)
del INDEX_HYPER_DICTS[0]['state']
del INDEX_HYPER_DICTS[1]['state']
del INDEX_HYPER_DICTS[0]['status']
del INDEX_HYPER_DICTS[1]['status']
def setUp(self):
super(HypervisorsTestV2, self).setUp()
self.rule_hyp_show = "compute_extension:hypervisors"
self.rule = {self.rule_hyp_show: ""}
def _set_up_controller(self):
self.context = context.get_admin_context()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
def test_index_non_admin_back_compatible_db(self):
self.policy.set_rules(self.rule)
req = self._get_request(False)
self.assertRaises(exception.AdminRequired,
self.controller.index, req)
def test_detail_non_admin_back_compatible_db(self):
self.policy.set_rules(self.rule)
req = self._get_request(False)
self.assertRaises(exception.AdminRequired,
self.controller.detail, req)
def test_search_non_admin_back_compatible_db(self):
self.policy.set_rules(self.rule)
req = self._get_request(False)
self.assertRaises(exception.AdminRequired,
self.controller.search, req,
self.TEST_HYPERS_OBJ[0].id)
def test_servers_non_admin_back_compatible_db(self):
self.policy.set_rules(self.rule)
req = self._get_request(False)
self.assertRaises(exception.AdminRequired,
self.controller.servers, req,
self.TEST_HYPERS_OBJ[0].id)
class CellHypervisorsTestV21(HypervisorsTestV21):
cell_path = 'cell1'
TEST_HYPERS_OBJ = [cells_utils.ComputeNodeProxy(obj, cell_path)
for obj in TEST_HYPERS_OBJ]
TEST_SERVICES = [cells_utils.ServiceProxy(obj, cell_path)
for obj in TEST_SERVICES]
TEST_SERVERS = [dict(server,
host=cells_utils.cell_with_item(cell_path,
server['host']))
for server in TEST_SERVERS]
DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV21.DETAIL_HYPERS_DICTS)
DETAIL_HYPERS_DICTS = [dict(hyp, id=cells_utils.cell_with_item(cell_path,
hyp['id']),
service=dict(hyp['service'],
id=cells_utils.cell_with_item(
cell_path,
hyp['service']['id']),
host=cells_utils.cell_with_item(
cell_path,
hyp['service']['host'])))
for hyp in DETAIL_HYPERS_DICTS]
INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS)
INDEX_HYPER_DICTS = [dict(hyp, id=cells_utils.cell_with_item(cell_path,
hyp['id']))
for hyp in INDEX_HYPER_DICTS]
@classmethod
def fake_compute_node_get_all(cls, context):
return cls.TEST_HYPERS_OBJ
@classmethod
def fake_compute_node_search_by_hypervisor(cls, context, hypervisor_re):
return cls.TEST_HYPERS_OBJ
@classmethod
def fake_compute_node_get(cls, context, compute_id):
for hyper in cls.TEST_HYPERS_OBJ:
if hyper.id == compute_id:
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
@classmethod
def fake_service_get_by_compute_host(cls, context, host):
for service in cls.TEST_SERVICES:
if service.host == host:
return service
@classmethod
def fake_instance_get_all_by_host(cls, context, host):
results = []
for inst in cls.TEST_SERVERS:
if inst['host'] == host:
results.append(inst)
return results
def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(CellHypervisorsTestV21, self).setUp()
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self.fake_compute_node_get_all)
self.stubs.Set(self.controller.host_api, 'service_get_by_compute_host',
self.fake_service_get_by_compute_host)
self.stubs.Set(self.controller.host_api,
'compute_node_search_by_hypervisor',
self.fake_compute_node_search_by_hypervisor)
self.stubs.Set(self.controller.host_api, 'compute_node_get',
self.fake_compute_node_get)
self.stubs.Set(self.controller.host_api, 'compute_node_statistics',
fake_compute_node_statistics)
self.stubs.Set(self.controller.host_api, 'instance_get_all_by_host',
self.fake_instance_get_all_by_host)
class CellHypervisorsTestV2(HypervisorsTestV2, CellHypervisorsTestV21):
cell_path = 'cell1'
DETAIL_HYPERS_DICTS = copy.deepcopy(HypervisorsTestV2.DETAIL_HYPERS_DICTS)
DETAIL_HYPERS_DICTS = [dict(hyp, id=cells_utils.cell_with_item(cell_path,
hyp['id']),
service=dict(hyp['service'],
id=cells_utils.cell_with_item(
cell_path,
hyp['service']['id']),
host=cells_utils.cell_with_item(
cell_path,
hyp['service']['host'])))
for hyp in DETAIL_HYPERS_DICTS]
INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV2.INDEX_HYPER_DICTS)
INDEX_HYPER_DICTS = [dict(hyp, id=cells_utils.cell_with_item(cell_path,
hyp['id']))
for hyp in INDEX_HYPER_DICTS]
def setUp(self):
super(CellHypervisorsTestV2, self).setUp()
|
apache-2.0
|
direvus/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_dnszone_facts.py
|
20
|
5086
|
#!/usr/bin/python
#
# Copyright (c) 2017 Obezimnaka Boms, <[email protected]>
#
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_dnszone_facts
version_added: "2.4"
short_description: Get DNS zone facts.
description:
- Get facts for a specific DNS zone or all DNS zones within a resource group.
options:
resource_group:
description:
- Limit results by resource group. Required when filtering by name.
name:
description:
- Only show results for a specific zone.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Obezimnaka Boms @ozboms"
'''
EXAMPLES = '''
- name: Get facts for one zone
azure_rm_dnszone_facts:
resource_group: Testing
name: foobar22
- name: Get facts for all zones in a resource group
azure_rm_dnszone_facts:
resource_group: Testing
- name: Get facts by tags
azure_rm_dnszone_facts:
tags:
- testing
'''
RETURN = '''
azure_dnszones:
description: List of zone dicts.
returned: always
type: list
example: [{
"etag": "00000002-0000-0000-0dcb-df5776efd201",
"location": "global",
"properties": {
"maxNumberOfRecordSets": 5000,
"numberOfRecordSets": 15
},
"tags": {}
}]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'DnsZone'
class AzureRMDNSZoneFacts(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
# store the results of the module operation
self.results = dict(
changed=False,
ansible_facts=dict(azure_dnszones=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMDNSZoneFacts, self).__init__(self.module_arg_spec)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
# list the conditions and what to return based on user input
if self.name is not None:
# if there is a name, facts about that specific zone
self.results['ansible_facts']['azure_dnszones'] = self.get_item()
elif self.resource_group:
# all the zones listed in that specific resource group
self.results['ansible_facts']['azure_dnszones'] = self.list_resource_group()
else:
# all the zones in a subscription
self.results['ansible_facts']['azure_dnszones'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
results = []
# get specific zone
try:
item = self.dns_client.zones.get(self.resource_group, self.name)
except CloudError:
pass
# serialize result
if item and self.has_tags(item.tags, self.tags):
results = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return results
def list_resource_group(self):
self.log('List items for resource group')
try:
response = self.dns_client.zones.list_by_resource_group(self.resource_group)
except AzureHttpError as exc:
self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def list_items(self):
self.log('List all items')
try:
response = self.dns_client.zones.list()
except AzureHttpError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMDNSZoneFacts()
if __name__ == '__main__':
main()
|
gpl-3.0
|
michael-dev2rights/ansible
|
lib/ansible/modules/cloud/google/gcp_healthcheck.py
|
49
|
15554
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_healthcheck
version_added: "2.4"
short_description: Create, Update or Destroy a Healthcheck.
description:
- Create, Update or Destroy a Healthcheck. Currently only HTTP and
HTTPS Healthchecks are supported. Healthchecks are used to monitor
individual instances, managed instance groups and/or backend
services. Healtchecks are reusable.
- Visit
U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
for an overview of Healthchecks on GCP.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
API details on HTTP Healthchecks.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
for more details on the HTTPS Healtcheck API.
requirements:
- "python >= 2.6"
- "google-api-python-client >= 1.6.2"
- "google-auth >= 0.9.0"
- "google-auth-httplib2 >= 0.0.2"
notes:
- Only supports HTTP and HTTPS Healthchecks currently.
author:
- "Tom Melendez (@supertom) <[email protected]>"
options:
check_interval:
description:
- How often (in seconds) to send a health check.
required: false
default: 5
healthcheck_name:
description:
- Name of the Healthcheck.
required: true
healthcheck_type:
description:
- Type of Healthcheck.
required: true
choices: ["HTTP", "HTTPS"]
host_header:
description:
- The value of the host header in the health check request. If left
empty, the public IP on behalf of which this health
check is performed will be used.
required: true
default: ""
port:
description:
- The TCP port number for the health check request. The default value is
443 for HTTPS and 80 for HTTP.
required: false
request_path:
description:
- The request path of the HTTPS health check request.
required: false
default: "/"
state:
description: State of the Healthcheck.
required: true
choices: ["present", "absent"]
timeout:
description:
- How long (in seconds) to wait for a response before claiming
failure. It is invalid for timeout
to have a greater value than check_interval.
required: false
default: 5
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this
many consecutive failures.
required: false
default: 2
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this
many consecutive successes.
required: false
default: 2
service_account_email:
description:
- service account email
required: false
default: null
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
required: false
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
credentials_file:
description:
- Path to the JSON file associated with the service account email
default: null
required: false
project_id:
description:
- Your GCP project ID
required: false
default: null
'''
EXAMPLES = '''
- name: Create Minimum HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
state: present
- name: Create HTTP HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
host: my-host
request_path: /hc
check_interval: 10
timeout: 30
unhealthy_threshhold: 2
healthy_threshhold: 1
state: present
- name: Create HTTPS HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: "{{ https_healthcheck }}"
healthcheck_type: HTTPS
host_header: my-host
request_path: /hc
check_interval: 5
timeout: 5
unhealthy_threshold: 2
healthy_threshold: 1
state: present
'''
RETURN = '''
state:
description: state of the Healthcheck
returned: Always.
type: str
sample: present
healthcheck_name:
description: Name of the Healthcheck
returned: Always
type: str
sample: my-url-map
healthcheck_type:
description: Type of the Healthcheck
returned: Always
type: str
sample: HTTP
healthcheck:
description: GCP Healthcheck dictionary
returned: Always. Refer to GCP documentation for detailed field descriptions.
type: dict
sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import get_google_api_client, GCPUtils
USER_AGENT_PRODUCT = 'ansible-healthcheck'
USER_AGENT_VERSION = '0.0.1'
def _validate_healthcheck_params(params):
"""
Validate healthcheck params.
Simple validation has already assumed by AnsibleModule.
:param params: Ansible dictionary containing configuration.
:type params: ``dict``
:return: True or raises ValueError
:rtype: ``bool`` or `class:ValueError`
"""
if params['timeout'] > params['check_interval']:
raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
params['timeout'], params['check_interval']))
return (True, '')
def _build_healthcheck_dict(params):
"""
Reformat services in Ansible Params for GCP.
:param params: Params from AnsibleModule object
:type params: ``dict``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: dictionary suitable for submission to GCP
HealthCheck (HTTP/HTTPS) API.
:rtype ``dict``
"""
gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
if 'timeout' in gcp_dict:
gcp_dict['timeoutSec'] = gcp_dict['timeout']
del gcp_dict['timeout']
if 'checkInterval' in gcp_dict:
gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
del gcp_dict['checkInterval']
if 'hostHeader' in gcp_dict:
gcp_dict['host'] = gcp_dict['hostHeader']
del gcp_dict['hostHeader']
if 'healthcheckType' in gcp_dict:
del gcp_dict['healthcheckType']
return gcp_dict
def _get_req_resource(client, resource_type):
if resource_type == 'HTTPS':
return (client.httpsHealthChecks(), 'httpsHealthCheck')
else:
return (client.httpHealthChecks(), 'httpHealthCheck')
def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
"""
Get a Healthcheck from GCP.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: A dict resp from the respective GCP 'get' request.
:rtype: ``dict``
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.get(**args)
return GCPUtils.execute_api_client_req(req, raise_404=False)
except:
raise
def create_healthcheck(client, params, project_id, resource_type='HTTP'):
"""
Create a new Healthcheck.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
try:
resource, _ = _get_req_resource(client, resource_type)
args = {'project': project_id, 'body': gcp_dict}
req = resource.insert(**args)
return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
if not return_data:
return_data = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=project_id)
return (True, return_data)
except:
raise
def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
"""
Delete a Healthcheck.
:param client: An initialized GCE Compute Disover resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.delete(**args)
return_data = GCPUtils.execute_api_client_req(req, client)
return (True, return_data)
except:
raise
def update_healthcheck(client, healthcheck, params, name, project_id,
resource_type='HTTP'):
"""
Update a Healthcheck.
If the healthcheck has not changed, the update will not occur.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param healthcheck: Name of the Url Map.
:type healthcheck: ``dict``
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
if ans:
return (False, 'no update necessary')
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name, 'body': gcp_dict}
req = resource.update(**args)
return_data = GCPUtils.execute_api_client_req(
req, client=client, raw=False)
return (True, return_data)
except:
raise
def main():
module = AnsibleModule(argument_spec=dict(
healthcheck_name=dict(required=True),
healthcheck_type=dict(required=True,
choices=['HTTP', 'HTTPS']),
request_path=dict(required=False, default='/'),
check_interval=dict(required=False, type='int', default=5),
healthy_threshold=dict(required=False, type='int', default=2),
unhealthy_threshold=dict(required=False, type='int', default=2),
host_header=dict(required=False, type='str', default=''),
timeout=dict(required=False, type='int', default=5),
port=dict(required=False, type='int'),
state=dict(choices=['absent', 'present'], default='present'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
credentials_file=dict(),
project_id=dict(), ), )
client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
user_agent_version=USER_AGENT_VERSION)
params = {}
params['healthcheck_name'] = module.params.get('healthcheck_name')
params['healthcheck_type'] = module.params.get('healthcheck_type')
params['request_path'] = module.params.get('request_path')
params['check_interval'] = module.params.get('check_interval')
params['healthy_threshold'] = module.params.get('healthy_threshold')
params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
params['host_header'] = module.params.get('host_header')
params['timeout'] = module.params.get('timeout')
params['port'] = module.params.get('port', None)
params['state'] = module.params.get('state')
if not params['port']:
params['port'] = 80
if params['healthcheck_type'] == 'HTTPS':
params['port'] = 443
try:
_validate_healthcheck_params(params)
except Exception as e:
module.fail_json(msg=e.message, changed=False)
changed = False
json_output = {'state': params['state']}
healthcheck = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
if not healthcheck:
if params['state'] == 'absent':
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown healthcheck: %s" %
(params['healthcheck_name']))
else:
# Create
changed, json_output['healthcheck'] = create_healthcheck(client,
params=params,
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
elif params['state'] == 'absent':
# Delete
changed, json_output['healthcheck'] = delete_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
else:
changed, json_output['healthcheck'] = update_healthcheck(client,
healthcheck=healthcheck,
params=params,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
gpl-3.0
|
vmindru/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_facts.py
|
14
|
3949
|
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup_facts
version_added: "2.1"
short_description: Get resource group facts.
description:
- Get facts for a specific resource group or all resource groups.
options:
name:
description:
- Limit results to a specific resource group.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one resource group
azure_rm_resourcegroup_facts:
name: Testing
- name: Get facts for all resource groups
azure_rm_resourcegroup_facts:
- name: Get facts by tags
azure_rm_resourcegroup_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_resourcegroups:
description: List of resource group dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing",
"location": "westus",
"name": "Testing",
"properties": {
"provisioningState": "Succeeded"
},
"tags": {
"delete": "never",
"testing": "testing"
}
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'ResourceGroup'
class AzureRMResourceGroupFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_resourcegroups=[])
)
self.name = None
self.tags = None
super(AzureRMResourceGroupFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name:
self.results['ansible_facts']['azure_resourcegroups'] = self.get_item()
else:
self.results['ansible_facts']['azure_resourcegroups'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.rm_client.resource_groups.get(self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return result
def list_items(self):
self.log('List all items')
try:
response = self.rm_client.resource_groups.list()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMResourceGroupFacts()
if __name__ == '__main__':
main()
|
gpl-3.0
|
sudheesh001/oh-mainline
|
vendor/packages/docutils/docutils/languages/en.py
|
246
|
1848
|
# $Id: en.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Author',
'authors': 'Authors',
'organization': 'Organization',
'address': 'Address',
'contact': 'Contact',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Date',
'copyright': 'Copyright',
'dedication': 'Dedication',
'abstract': 'Abstract',
'attention': 'Attention!',
'caution': 'Caution!',
'danger': '!DANGER!',
'error': 'Error',
'hint': 'Hint',
'important': 'Important',
'note': 'Note',
'tip': 'Tip',
'warning': 'Warning',
'contents': 'Contents'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'author': 'author',
'authors': 'authors',
'organization': 'organization',
'address': 'address',
'contact': 'contact',
'version': 'version',
'revision': 'revision',
'status': 'status',
'date': 'date',
'copyright': 'copyright',
'dedication': 'dedication',
'abstract': 'abstract'}
"""English (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
|
agpl-3.0
|
bev-a-tron/pledge_service
|
build/mailchimp/requests/packages/chardet/latin1prober.py
|
950
|
5241
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] / total)
- (self._mFreqCounter[1] * 20.0 / total))
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.5
return confidence
|
apache-2.0
|
igemsoftware/SYSU-Software2013
|
project/Python27_32/Lib/sqlite3/__init__.py
|
239
|
1037
|
#-*- coding: ISO-8859-1 -*-
# pysqlite2/__init__.py: the pysqlite2 package.
#
# Copyright (C) 2005 Gerhard Häring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from dbapi2 import *
|
mit
|
javivi001/horus
|
src/horus/engine/board.py
|
2
|
7274
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------#
# #
# This file is part of the Horus Project #
# #
# Copyright (C) 2014-2015 Mundo Reader S.L. #
# #
# Date: August, November 2014 #
# Author: Jesús Arroyo Torrens <[email protected]> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#-----------------------------------------------------------------------#
__author__ = "Jesús Arroyo Torrens <[email protected]>"
__license__ = "GNU General Public License v2 http://www.gnu.org/licenses/gpl.html"
import time
import serial
import threading
class Error(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class WrongFirmware(Error):
def __init__(self, msg="WrongFirmware"):
super(Error, self).__init__(msg)
class BoardNotConnected(Error):
def __init__(self, msg="BoardNotConnected"):
super(Error, self).__init__(msg)
class Board:
"""Board class. For accessing to the scanner board"""
"""
Gcode commands:
G1 Fnnn : feed rate
G1 Xnnn : move motor
M70 Tn : switch off laser n
M71 Tn : switch on laser n
"""
def __init__(self, parent=None, serialName='/dev/ttyUSB0', baudRate=115200):
self.parent = parent
self.serialName = serialName
self.baudRate = baudRate
self.serialPort = None
self.isConnected = False
self.unplugCallback = None
self._position = 0
self._direction = 1
self._n = 0 # Check if command fails
def setSerialName(self, serialName):
self.serialName = serialName
def setBaudRate(self, baudRate):
self.baudRate = baudRate
def setInvertMotor(self, invertMotor):
if invertMotor:
self._direction = -1
else:
self._direction = +1
def setUnplugCallback(self, unplugCallback=None):
self.unplugCallback = unplugCallback
def connect(self):
""" Opens serial port and performs handshake"""
print ">>> Connecting board {0} {1}".format(self.serialName, self.baudRate)
self.isConnected = False
try:
self.serialPort = serial.Serial(self.serialName, self.baudRate, timeout=2)
if self.serialPort.isOpen():
#-- Force Reset and flush
self._reset()
version = self.serialPort.readline()
if version == "Horus 0.1 ['$' for help]\r\n":
self.setSpeedMotor(1)
self.setAbsolutePosition(0)
self.serialPort.timeout = 0.05
print ">>> Done"
self.isConnected = True
else:
raise WrongFirmware()
else:
raise BoardNotConnected()
except:
print "Error opening the port {0}\n".format(self.serialName)
self.serialPort = None
raise BoardNotConnected()
def disconnect(self):
""" Closes serial port """
if self.isConnected:
print ">>> Disconnecting board {0}".format(self.serialName)
try:
if self.serialPort is not None:
self.setLeftLaserOff()
self.setRightLaserOff()
self.disableMotor()
self.serialPort.close()
del self.serialPort
except serial.SerialException:
print "Error closing the port {0}\n".format(self.serialName)
print ">>> Error"
self.isConnected = False
print ">>> Done"
def enableMotor(self):
return self._sendCommand("M17")
def disableMotor(self):
return self._sendCommand("M18")
def setSpeedMotor(self, feedRate):
self.feedRate = feedRate
return self._sendCommand("G1F{0}".format(self.feedRate))
def setAccelerationMotor(self, acceleration):
self.acceleration = acceleration
return self._sendCommand("$120={0}".format(self.acceleration))
def setRelativePosition(self, pos):
self._posIncrement = pos
def setAbsolutePosition(self, pos):
self._posIncrement = 0
self._position = pos
def moveMotor(self, nonblocking=False, callback=None):
self._position += self._posIncrement * self._direction
return self._sendCommand("G1X{0}".format(self._position), nonblocking, callback)
def setRightLaserOn(self):
return self._sendCommand("M71T2")
def setLeftLaserOn(self):
return self._sendCommand("M71T1")
def setRightLaserOff(self):
return self._sendCommand("M70T2")
def setLeftLaserOff(self):
return self._sendCommand("M70T1")
def getLDRSensor(self, pin):
value = self.sendRequest("M50T"+pin, readLines=True).split("\n")[0]
try:
return int(value)
except ValueError:
return 0
def sendRequest(self, req, nonblocking=False, callback=None, readLines=False):
if nonblocking:
threading.Thread(target=self._sendRequest, args=(req, callback, readLines)).start()
else:
return self._sendRequest(req, callback, readLines)
def _sendRequest(self, req, callback=None, readLines=False):
"""Sends the request and returns the response"""
ret = ''
if self.isConnected and req != '':
if self.serialPort is not None and self.serialPort.isOpen():
try:
self.serialPort.flushInput()
self.serialPort.flushOutput()
self.serialPort.write(req+"\r\n")
while ret == '': # TODO: add timeout
if readLines:
ret = ''.join(self.serialPort.readlines())
else:
ret = ''.join(self.serialPort.readline())
time.sleep(0.01)
self._success()
except:
if callback is not None:
callback(ret)
self._fail()
else:
self._fail()
if callback is not None:
callback(ret)
return ret
def _success(self):
self._n = 0
def _fail(self):
self._n += 1
if self._n >= 1:
self._n = 0
if self.unplugCallback is not None and \
self.parent is not None and not self.parent.unplugged:
self.parent.unplugged = True
self.unplugCallback()
def _checkAcknowledge(self, ack):
if ack is not None:
return ack.endswith("ok\r\n")
else:
return False
def _sendCommand(self, cmd, nonblocking=False, callback=None):
if nonblocking:
self.sendRequest(cmd, nonblocking, callback)
else:
return self._checkAcknowledge(self._sendRequest(cmd))
def _reset(self):
self.serialPort.flushInput()
self.serialPort.flushOutput()
self.serialPort.write("\x18\r\n") # Ctrl-x
self.serialPort.readline()
|
gpl-2.0
|
harish2rb/pyGeoNet
|
test/test_pygeonet_processing.py
|
1
|
4709
|
# pyGeoNet_readGeotiff
#import sys
#import os
from osgeo import gdal
#from string import *
import numpy as np
from time import clock
import pygeonet_defaults as defaults
import pygeonet_prepare as Parameters
from math import modf, floor
#from scipy.stats.mstats import mquantiles
def read_dem_from_geotiff(demFileName,demFilePath):
# Open the GeoTIFF format DEM
fullFilePath = demFilePath + demFileName
#fullFilePath = "G:\\HarishLaptop_Backup\\TI102782W0E\\PythonScripts\\pyGeoNet1.0\\data\\skunk.tif"
print fullFilePath
ary = []
ds = gdal.Open(fullFilePath, gdal.GA_ReadOnly)
geotransform = ds.GetGeoTransform()
'''
print 'Driver: ', ds.GetDriver().ShortName,'/', \
ds.GetDriver().LongName
print 'Size is ',ds.RasterXSize,'x',ds.RasterYSize, \
'x',ds.RasterCount
print 'Projection is ',ds.GetProjection()
if not geotransform is None:
print 'Origin = (',geotransform[0], ',',geotransform[3],')'
print 'Pixel Size = (',geotransform[1], ',',geotransform[5],')'
'''
ary = ds.GetRasterBand(1).ReadAsArray()
#Parameters.geospatialReferenceArray
#Parameters.geoReferencingMatrix
#Parameters.geoBoundingBox
Parameters.demPixelScale = geotransform[1]
Parameters.xLowerLeftCoord = geotransform[0]
Parameters.yLowerLeftCoord = geotransform[3]
return ary
def quantile(x, q, qtype = 7, issorted = False):
"""
Args:
x - input data
q - quantile
qtype - algorithm
issorted- True if x already sorted.
Compute quantiles from input array x given q.For median,
specify q=0.5.
References:
http://reference.wolfram.com/mathematica/ref/Quantile.html
http://wiki.r-project.org/rwiki/doku.php?id=rdoc:stats:quantile
Author:
Ernesto P.Adorio Ph.D.
UP Extension Program in Pampanga, Clark Field.
"""
if not issorted:
y = sorted(x)
else:
y = x
if not (1 <= qtype <= 9):
return None # error!
# Parameters for the Hyndman and Fan algorithm
abcd = [(0, 0, 1, 0), # inverse empirical distrib.function., R type 1
(0.5, 0, 1, 0), # similar to type 1, averaged, R type 2
(0.5, 0, 0, 0), # nearest order statistic,(SAS) R type 3
(0, 0, 0, 1), # California linear interpolation, R type 4
(0.5, 0, 0, 1), # hydrologists method, R type 5
(0, 1, 0, 1), # mean-based estimate(Weibull method), (SPSS,Minitab), type 6
(1, -1, 0, 1), # mode-based method,(S, S-Plus), R type 7
(1.0/3, 1.0/3, 0, 1), # median-unbiased , R type 8
(3/8.0, 0.25, 0, 1) # normal-unbiased, R type 9.
]
a, b, c, d = abcd[qtype-1]
n = len(x)
g, j = modf( a + (n+b) * q -1)
if j < 0:
return y[0]
elif j >= n:
return y[n-1] # oct. 8, 2010 y[n]???!! uncaught off by 1 error!!!
j = int(floor(j))
if g == 0:
return y[j]
else:
return y[j] + (y[j+1]- y[j])* (c + d * g)
def main():
#demFileName = "skunk.tif"
#demFilePath = "G:\\HarishLaptop_Backup\\TI102782W0E\\PythonScripts\\pyGeoNet1.0\\data\\"
print "Reading input file path :",Parameters.demDataFilePath
print "Reading input file :",Parameters.demFileName
rawDemArray = read_dem_from_geotiff(Parameters.demFileName,Parameters.demDataFilePath)
nanDemArray=rawDemArray
nanDemArray[nanDemArray < defaults.demNanFlag]= np.NAN
Parameters.minDemValue= np.min(nanDemArray[:])
Parameters.maxDemValue= np.max(nanDemArray[:])
# Area of analysis
Parameters.xDemSize=np.size(rawDemArray,0)
Parameters.yDemSize=np.size(rawDemArray,1)
# Calculate pixel length scale and assume square
Parameters.maxLowerLeftCoord = np.max([Parameters.xDemSize, Parameters.yDemSize])
print 'DTM size: ',Parameters.xDemSize, 'x' ,Parameters.yDemSize
#-----------------------------------------------------------------------------
# Compute slope magnitude for raw and filtered DEMs
print 'Computing slope of raw DTM'
slopeMagnitudeDemArray = np.gradient(nanDemArray,Parameters.demPixelScale)
print slopeMagnitudeDemArray
# Computation of the threshold lambda used in Perona-Malik nonlinear
# filtering. The value of lambda (=edgeThresholdValue) is given by the 90th
# quantile of the absolute value of the gradient.
print'Computing lambda = q-q-based nonlinear filtering threshold'
mult = Parameters.xDemSize * Parameters.yDemSize
print np.size(slopeMagnitudeDemArray,0)
edgeThresholdValue = quantile(np.reshape(slopeMagnitudeDemArray,mult),defaults.demSmoothingQuantile)
print edgeThresholdValue
if __name__ == '__main__':
t0 = clock()
main()
t1 = clock()
print "time taken to complete the script is::",t1-t0," seconds"
print "script complete"
|
gpl-3.0
|
dkarakats/edx-platform
|
lms/djangoapps/django_comment_client/forum/tests.py
|
14
|
53829
|
import json
import logging
import ddt
from django.core import cache
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from edxmako.tests import mako_middleware_process_request
from django_comment_client.forum import views
from django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
)
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_client.tests.utils import CohortedTestCase, ContentGroupTestCase
from django_comment_client.utils import strip_none
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from util.testing import UrlResetMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
TEST_DATA_MONGO_MODULESTORE
)
from xmodule.modulestore.tests.factories import check_mongo_calls, CourseFactory, ItemFactory
from courseware.courses import UserNotEnrolled
from nose.tools import assert_true # pylint: disable=E0611
from mock import patch, Mock, ANY, call
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
log = logging.getLogger(__name__)
# pylint: disable=missing-docstring
class ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsExceptionTestCase, self).setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = '[email protected]'
password = 'test'
# Create the student
self.student = UserFactory(username=uname, password=password, email=email)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
# Log the student in
self.client = Client()
assert_true(self.client.login(username=uname, password=password))
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_user_profile_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('django_comment_client.forum.views.user_profile',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.subscribed_threads')
def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('django_comment_client.forum.views.followed_threads',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
def make_mock_thread_data(course, text, thread_id, num_children, group_id=None, group_name=None, commentable_id=None):
thread_data = {
"id": thread_id,
"type": "thread",
"title": text,
"body": text,
"commentable_id": (
commentable_id or course.discussion_topics.get('General', {}).get('id') or "dummy_commentable_id"
),
"resp_total": 42,
"resp_skip": 25,
"resp_limit": 5,
"group_id": group_id
}
if group_id is not None:
thread_data['group_name'] = group_name
if num_children is not None:
thread_data["children"] = [{
"id": "dummy_comment_id_{}".format(i),
"type": "comment",
"body": text,
} for i in range(num_children)]
return thread_data
def make_mock_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1
):
def mock_request_impl(*args, **kwargs):
url = args[1]
data = None
if url.endswith("threads") or url.endswith("user_profile"):
data = {
"collection": [
make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=None,
group_id=group_id,
commentable_id=commentable_id
)
]
}
elif thread_id and url.endswith(thread_id):
data = make_mock_thread_data(
course=course, text=text, thread_id=thread_id, num_children=num_thread_responses, group_id=group_id
)
elif "/users/" in url:
data = {
"default_sort_key": "date",
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
# comments service adds these attributes when course_id param is present
if kwargs.get('params', {}).get('course_id'):
data.update({
"threads_count": 1,
"comments_count": 2
})
if data:
return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))
return Mock(status_code=404)
return mock_request_impl
class StringEndsWithMatcher(object):
def __init__(self, suffix):
self.suffix = suffix
def __eq__(self, other):
return other.endswith(self.suffix)
class PartialDictMatcher(object):
def __init__(self, expected_values):
self.expected_values = expected_values
def __eq__(self, other):
return all([
key in other and other[key] == value
for key, value in self.expected_values.iteritems()
])
@patch('requests.request')
class SingleThreadTestCase(ModuleStoreTestCase):
def setUp(self):
super(SingleThreadTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def test_ajax(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({"mark_as_read": True, "user_id": 1, "recursive": True}),
headers=ANY,
timeout=ANY
)
def test_skip_limit(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
response_skip = "45"
response_limit = "15"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
{"resp_skip": response_skip, "resp_limit": response_limit},
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({
"mark_as_read": True,
"user_id": 1,
"recursive": True,
"resp_skip": response_skip,
"resp_limit": response_limit,
}),
headers=ANY,
timeout=ANY
)
def test_post(self, mock_request):
request = RequestFactory().post("dummy_url")
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"dummy_thread_id"
)
self.assertEquals(response.status_code, 405)
def test_not_found(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
# Mock request to return 404 for thread request
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id=None)
self.assertRaises(
Http404,
views.single_thread,
request,
self.course.id.to_deprecated_string(),
"test_discussion_id",
"test_thread_id"
)
@ddt.ddt
@patch('requests.request')
class SingleThreadQueryCountTestCase(ModuleStoreTestCase):
"""
Ensures the number of modulestore queries and number of sql queries are
independent of the number of responses retrieved for a given discussion thread.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
@ddt.data(
# old mongo with cache: 15
(ModuleStoreEnum.Type.mongo, 1, 21, 15, 40, 27),
(ModuleStoreEnum.Type.mongo, 50, 315, 15, 628, 27),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, 1, 3, 3, 40, 27),
(ModuleStoreEnum.Type.split, 50, 3, 3, 628, 27),
)
@ddt.unpack
def test_number_of_mongo_queries(
self,
default_store,
num_thread_responses,
num_uncached_mongo_calls,
num_cached_mongo_calls,
num_uncached_sql_queries,
num_cached_sql_queries,
mock_request
):
with modulestore().default_store(default_store):
course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=course.id)
test_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=course, text="dummy content", thread_id=test_thread_id, num_thread_responses=num_thread_responses
)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = student
def call_single_thread():
"""
Call single_thread and assert that it returns what we expect.
"""
response = views.single_thread(
request,
course.id.to_deprecated_string(),
"dummy_discussion_id",
test_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(json.loads(response.content)["content"]["children"]), num_thread_responses)
# TODO: update this once django cache is disabled in tests
# Test with and without cache, clearing before and after use.
single_thread_local_cache = cache.get_cache(
backend='default',
LOCATION='single_thread_local_cache'
)
single_thread_dummy_cache = cache.get_cache(
backend='django.core.cache.backends.dummy.DummyCache',
LOCATION='single_thread_local_cache'
)
cached_calls = [
[single_thread_dummy_cache, num_uncached_mongo_calls, num_uncached_sql_queries],
[single_thread_local_cache, num_cached_mongo_calls, num_cached_sql_queries]
]
for single_thread_cache, expected_mongo_calls, expected_sql_queries in cached_calls:
single_thread_cache.clear()
with patch("django_comment_client.permissions.CACHE", single_thread_cache):
with self.assertNumQueries(expected_sql_queries):
with check_mongo_calls(expected_mongo_calls):
call_single_thread()
single_thread_cache.clear()
@patch('requests.request')
class SingleCohortedThreadTestCase(CohortedTestCase):
def _create_mock_cohorted_thread(self, mock_request):
self.mock_text = "dummy content"
self.mock_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.mock_text, thread_id=self.mock_thread_id, group_id=self.student_cohort.id
)
def test_ajax(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEquals(
response_data["content"],
make_mock_thread_data(
course=self.course,
text=self.mock_text,
thread_id=self.mock_thread_id,
num_children=1,
group_id=self.student_cohort.id,
group_name=self.student_cohort.name
)
)
def test_html(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get("dummy_url")
request.user = self.student
mako_middleware_process_request(request)
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
# Verify that the group name is correctly included in the HTML
self.assertRegexpMatches(html, r'"group_name": "student_cohort"')
@patch('lms.lib.comment_client.utils.requests.request')
class SingleThreadAccessTestCase(CohortedTestCase):
def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", thread_id=thread_id, group_id=thread_group_id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
thread_id
)
def test_student_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_student_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
# this test ensures that a thread response from the cs with group_id: null
# behaves the same as a thread response without a group_id (see: TNL-444)
def test_student_global_thread_in_cohorted_topic(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=None
)
self.assertEqual(resp.status_code, 200)
def test_student_different_cohort(self, mock_request):
self.assertRaises(
Http404,
lambda: self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.moderator_cohort.id
)
)
def test_moderator_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_moderator_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.moderator_cohort.id
)
self.assertEqual(resp.status_code, 200)
def test_moderator_different_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
@patch('lms.lib.comment_client.utils.requests.request')
class SingleThreadGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", group_id=self.student_cohort.id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = user
mako_middleware_process_request(request)
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
"dummy_thread_id"
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['content']
)
@patch('requests.request')
class SingleThreadContentGroupTestCase(ContentGroupTestCase):
def assert_can_access(self, user, discussion_id, thread_id, should_have_access):
"""
Verify that a user has access to a thread within a given
discussion_id when should_have_access is True, otherwise
verify that the user does not have access to that thread.
"""
request = RequestFactory().get("dummy_url")
request.user = user
mako_middleware_process_request(request)
def call_single_thread():
return views.single_thread(
request,
unicode(self.course.id),
discussion_id,
thread_id
)
if should_have_access:
self.assertEqual(call_single_thread().status_code, 200)
else:
with self.assertRaises(Http404):
call_single_thread()
def test_staff_user(self, mock_request):
"""
Verify that the staff user can access threads in the alpha,
beta, and global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.alpha_module, self.beta_module, self.global_module]:
self.assert_can_access(self.staff_user, discussion_module.discussion_id, thread_id, True)
def test_alpha_user(self, mock_request):
"""
Verify that the alpha user can access threads in the alpha and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.alpha_module, self.global_module]:
self.assert_can_access(self.alpha_user, discussion_module.discussion_id, thread_id, True)
self.assert_can_access(self.alpha_user, self.beta_module.discussion_id, thread_id, False)
def test_beta_user(self, mock_request):
"""
Verify that the beta user can access threads in the beta and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.beta_module, self.global_module]:
self.assert_can_access(self.beta_user, discussion_module.discussion_id, thread_id, True)
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_non_cohorted_user(self, mock_request):
"""
Verify that the non-cohorted user can access threads in just the
global discussion module.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
self.assert_can_access(self.non_cohorted_user, self.global_module.discussion_id, thread_id, True)
self.assert_can_access(self.non_cohorted_user, self.alpha_module.discussion_id, thread_id, False)
self.assert_can_access(self.non_cohorted_user, self.beta_module.discussion_id, thread_id, False)
@patch('lms.lib.comment_client.utils.requests.request')
class InlineDiscussionGroupIdTestCase(
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def setUp(self):
super(InlineDiscussionGroupIdTestCase, self).setUp()
self.cohorted_commentable_id = 'cohorted_topic'
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {'commentable_id': self.cohorted_commentable_id}
if group_id:
# avoid causing a server error when the LMS chokes attempting
# to find a group name for the group_id, when we're testing with
# an invalid one.
try:
CourseUserGroup.objects.get(id=group_id)
kwargs['group_id'] = group_id
except CourseUserGroup.DoesNotExist:
pass
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data
)
request.user = user
return views.inline_discussion(
request,
self.course.id.to_deprecated_string(),
commentable_id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
self.cohorted_commentable_id,
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request')
class ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = user
mako_middleware_process_request(request)
return views.forum_form_discussion(
request,
self.course.id.to_deprecated_string()
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request')
class UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/active_threads"
def call_view_for_profiled_user(
self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False
):
"""
Calls "user_profile" view method on behalf of "requesting_user" to get information about
the user "profiled_user".
"""
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = requesting_user
mako_middleware_process_request(request)
return views.user_profile(
request,
self.course.id.to_deprecated_string(),
profiled_user.id
)
def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
return self.call_view_for_profiled_user(
mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
def _test_group_id_passed_to_user_profile(
self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id
):
"""
Helper method for testing whether or not group_id was passed to the user_profile request.
"""
def get_params_from_user_info_call(for_specific_course):
"""
Returns the request parameters for the user info call with either course_id specified or not,
depending on value of 'for_specific_course'.
"""
# There will be 3 calls from user_profile. One has the cs_endpoint "active_threads", and it is already
# tested. The other 2 calls are for user info; one of those calls is for general information about the user,
# and it does not specify a course_id. The other call does specify a course_id, and if the caller did not
# have discussion moderator privileges, it should also contain a group_id.
for r_call in mock_request.call_args_list:
if not r_call[0][1].endswith(self.cs_endpoint):
params = r_call[1]["params"]
has_course_id = "course_id" in params
if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):
return params
self.assertTrue(
False,
"Did not find appropriate user_profile call for 'for_specific_course'=" + for_specific_course
)
mock_request.reset_mock()
self.call_view_for_profiled_user(
mock_request,
requesting_user,
profiled_user,
group_id,
pass_group_id=pass_group_id,
is_ajax=False
)
# Should never have a group_id if course_id was not included in the request.
params_without_course_id = get_params_from_user_info_call(False)
self.assertNotIn("group_id", params_without_course_id)
params_with_course_id = get_params_from_user_info_call(True)
if expect_group_id_in_request:
self.assertIn("group_id", params_with_course_id)
self.assertEqual(group_id, params_with_course_id["group_id"])
else:
self.assertNotIn("group_id", params_with_course_id)
def test_group_id_passed_to_user_profile_student(self, mock_request):
"""
Test that the group id is always included when requesting user profile information for a particular
course if the requester does not have discussion moderation privileges.
"""
def verify_group_id_always_present(profiled_user, pass_group_id):
"""
Helper method to verify that group_id is always present for student in course
(non-privileged user).
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the student (non-privileged user).
# The profile returned on behalf of the student is for the profiled_user.
verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)
def test_group_id_user_profile_moderator(self, mock_request):
"""
Test that the group id is only included when a privileged user requests user profile information for a
particular course and user if the group_id is explicitly passed in.
"""
def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is not present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the moderator (privileged user).
# If the group_id is explicitly passed, it will be present in the request.
verify_group_id_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_present(
profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort
)
# If the group_id is not explicitly passed, it will not be present because the requesting_user
# has discussion moderator privileges.
verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)
@patch('lms.lib.comment_client.utils.requests.request')
class FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/subscribed_threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.followed_threads(
request,
self.course.id.to_deprecated_string(),
user.id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
class InlineDiscussionTestCase(ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
display_name='Discussion1',
discussion_category="Chapter",
discussion_target="Discussion1"
)
@patch('lms.lib.comment_client.utils.requests.request')
def test_courseware_data(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", commentable_id=self.discussion1.discussion_id
)
response = views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.discussion1.discussion_id
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
expected_courseware_url = '/courses/TestX/101/Test_Course/jump_to/i4x://TestX/101/discussion/Discussion1'
expected_courseware_title = 'Chapter / Discussion1'
self.assertEqual(response_data['discussion_data'][0]['courseware_url'], expected_courseware_url)
self.assertEqual(response_data["discussion_data"][0]["courseware_title"], expected_courseware_title)
@patch('requests.request')
class UserProfileTestCase(ModuleStoreTestCase):
TEST_THREAD_TEXT = 'userprofile-test-text'
TEST_THREAD_ID = 'userprofile-test-thread-id'
def setUp(self):
super(UserProfileTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
self.profiled_user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def get_response(self, mock_request, params, **headers):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().get("dummy_url", data=params, **headers)
request.user = self.student
mako_middleware_process_request(request)
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
mock_request.assert_any_call(
"get",
StringEndsWithMatcher('/users/{}/active_threads'.format(self.profiled_user.id)),
data=None,
params=PartialDictMatcher({
"course_id": self.course.id.to_deprecated_string(),
"page": params.get("page", 1),
"per_page": views.THREADS_PER_PAGE
}),
headers=ANY,
timeout=ANY
)
return response
def check_html(self, mock_request, **params):
response = self.get_response(mock_request, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
self.assertRegexpMatches(html, r'data-page="1"')
self.assertRegexpMatches(html, r'data-num-pages="1"')
self.assertRegexpMatches(html, r'<span>1</span> discussion started')
self.assertRegexpMatches(html, r'<span>2</span> comments')
self.assertRegexpMatches(html, r'"id": "{}"'.format(self.TEST_THREAD_ID))
self.assertRegexpMatches(html, r'"title": "{}"'.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r'"body": "{}"'.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r'"username": "{}"'.format(self.student.username))
def check_ajax(self, mock_request, **params):
response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.keys()),
["annotated_content_info", "discussion_data", "num_pages", "page"]
)
self.assertEqual(len(response_data['discussion_data']), 1)
self.assertEqual(response_data["page"], 1)
self.assertEqual(response_data["num_pages"], 1)
self.assertEqual(response_data['discussion_data'][0]['id'], self.TEST_THREAD_ID)
self.assertEqual(response_data['discussion_data'][0]['title'], self.TEST_THREAD_TEXT)
self.assertEqual(response_data['discussion_data'][0]['body'], self.TEST_THREAD_TEXT)
def test_html(self, mock_request):
self.check_html(mock_request)
def test_html_p2(self, mock_request):
self.check_html(mock_request, page="2")
def test_ajax(self, mock_request):
self.check_ajax(mock_request)
def test_ajax_p2(self, mock_request):
self.check_ajax(mock_request, page="2")
def test_404_profiled_user(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
self.course.id.to_deprecated_string(),
-999
)
def test_404_course(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
"non/existent/course",
self.profiled_user.id
)
def test_post(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().post("dummy_url")
request.user = self.student
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
self.assertEqual(response.status_code, 405)
@patch('requests.request')
class CommentsServiceRequestHeadersTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CommentsServiceRequestHeadersTestCase, self).setUp()
username = "foo"
password = "bar"
# Invoke UrlResetMixin
super(CommentsServiceRequestHeadersTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
def assert_all_calls_have_header(self, mock_request, key, value):
expected = call(
ANY, # method
ANY, # url
data=ANY,
params=ANY,
headers=PartialDictMatcher({key: value}),
timeout=ANY
)
for actual in mock_request.call_args_list:
self.assertEqual(expected, actual)
def test_accept_language(self, mock_request):
lang = "eo"
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
self.client.get(
reverse(
"django_comment_client.forum.views.single_thread",
kwargs={
"course_id": self.course.id.to_deprecated_string(),
"discussion_id": "dummy_discussion_id",
"thread_id": thread_id,
}
),
HTTP_ACCEPT_LANGUAGE=lang,
)
self.assert_all_calls_have_header(mock_request, "Accept-Language", lang)
@override_settings(COMMENTS_SERVICE_KEY="test_api_key")
def test_api_key(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id="dummy")
self.client.get(
reverse(
"django_comment_client.forum.views.forum_form_discussion",
kwargs={"course_id": self.course.id.to_deprecated_string()}
),
)
self.assert_all_calls_have_header(mock_request, "X-Edx-Api-Key", "test_api_key")
class InlineDiscussionUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin):
def setUp(self):
super(InlineDiscussionUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
response = views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.course.discussion_topics['General']['id']
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class ForumFormDiscussionUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin):
def setUp(self):
super(ForumFormDiscussionUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class ForumDiscussionSearchUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin):
def setUp(self):
super(ForumDiscussionSearchUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
data = {
"ajax": 1,
"text": text,
}
request = RequestFactory().get("dummy_url", data)
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class SingleThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin):
def setUp(self):
super(SingleThreadUnicodeTestCase, self).setUp()
self.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.single_thread(request, self.course.id.to_deprecated_string(), "dummy_discussion_id", thread_id)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["content"]["title"], text)
self.assertEqual(response_data["content"]["body"], text)
class UserProfileUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin):
def setUp(self):
super(UserProfileUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.user_profile(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class FollowedThreadsUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin):
def setUp(self):
super(FollowedThreadsUnicodeTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.followed_threads(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class EnrollmentTestCase(ModuleStoreTestCase):
"""
Tests for the behavior of views depending on if the student is enrolled
in the course
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(EnrollmentTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('lms.lib.comment_client.utils.requests.request')
def test_unenrolled(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
request = RequestFactory().get('dummy_url')
request.user = self.student
with self.assertRaises(UserNotEnrolled):
views.forum_form_discussion(request, course_id=self.course.id.to_deprecated_string())
|
agpl-3.0
|
krast/suse_xen
|
tools/python/xen/xend/server/netif2.py
|
43
|
5574
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <[email protected]>
# Copyright (C) 2005 XenSource Ltd
# Copyright (C) 2008 Citrix Systems Inc.
#============================================================================
#
# Based closely on netif.py.
#
"""Support for virtual network interfaces, version 2.
"""
import os
import random
import re
import time
from xen.xend import XendOptions
from xen.xend.server.DevController import DevController
from xen.xend.XendError import VmError
from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
from xen.xend.xenstore.xstransact import xstransact
import xen.util.xsm.xsm as security
from xen.xend.XendLogging import log
xoptions = XendOptions.instance()
def randomMAC():
"""Generate a random MAC address.
Uses OUI (Organizationally Unique Identifier) 00-16-3E, allocated to
Xensource, Inc. The OUI list is available at
http://standards.ieee.org/regauth/oui/oui.txt.
The remaining 3 fields are random, with the first bit of the first
random field set 0.
@return: MAC address string
"""
mac = [ 0x00, 0x16, 0x3e,
random.randint(0x00, 0x7f),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff) ]
return ':'.join(map(lambda x: "%02x" % x, mac))
class NetifController2(DevController):
def __init__(self, vm):
DevController.__init__(self, vm)
def getDeviceDetails(self, config):
"""@see DevController.getDeviceDetails"""
devid = self.allocateDeviceID()
bridge = config.get('bridge')
back_mac = config.get('back_mac')
if not back_mac:
if bridge:
back_mac = "fe:ff:ff:ff:ff:ff"
else:
back_mac = randomMAC()
front_mac = config.get('front_mac') or randomMAC()
front_trust = config.get("trusted") or "0"
back_trust = config.get("back_trusted") or "1"
max_bypasses = config.get("max_bypasses") or "5"
pdev = config.get('pdev')
front_filter = config.get("front_filter_mac")
if front_filter == None:
if back_trust == "0":
front_filter = "1"
else:
front_filter = "0"
back_filter = config.get("filter_mac")
if back_filter == None:
if front_trust == "0":
back_filter = "1"
else:
back_filter = "0"
back = { 'mac': back_mac, 'remote-mac': front_mac,
'handle': "%i" % devid, 'local-trusted': back_trust,
'remote-trusted': front_trust, 'filter-mac': back_filter,
'max-bypasses': max_bypasses }
front = { 'mac': front_mac, 'remote-mac': back_mac,
'local-trusted': front_trust, 'remote-trusted': back_trust,
'filter-mac': front_filter }
if bridge:
back['bridge'] = bridge
if pdev:
back['pdev'] = pdev
return (devid, back, front)
def getDeviceConfiguration(self, devid, transaction = None):
"""@see DevController.configuration"""
if transaction is None:
read_fn = xstransact.Read
else:
read_fn = transaction.read
def front_read(x):
return read_fn(frontpath + x)
def back_read(x):
return read_fn(backpath + x)
result = DevController.getDeviceConfiguration(self, devid, transaction)
dev = self.convertToDeviceNumber(devid)
frontpath = self.frontendPath(dev) + "/"
backpath = front_read("backend") + "/"
front_mac = front_read("mac")
back_mac = back_read("mac")
front_trusted = back_read("remote-trusted")
back_trusted = back_read("local-trusted")
max_bypasses = back_read("max-bypasses")
bridge = back_read("bridge")
pdev = back_read("pdev")
if front_mac:
result["front_mac"] = front_mac
if back_mac:
result["back_mac"] = back_mac
if front_trusted:
result["front_trusted"] = front_trusted
if back_trusted:
result["back_trusted"] = back_trusted
if bridge:
result["bridge"] = bridge
if pdev:
result["pdev"] = pdev
if max_bypasses:
result["max-bypasses"] = max_bypasses
return result
def destroyDevice(self, devid, force):
dev = self.convertToDeviceNumber(devid)
self.writeBackend(dev, "online", "0")
if force:
self.writeBackend(dev, "shutdown-request", "force")
else:
self.writeBackend(dev, "shutdown-request", "normal")
self.vm._removeVm("device/%s/%d" % (self.deviceClass, dev))
|
gpl-2.0
|
allanlei/django-landlord
|
landlord/routers.py
|
1
|
3170
|
# from landlord import landlord
# from landlord.conf import settings
# import logging
# logger = logging.getLogger(__name__)
# class LandlordRouter(object):
# @property
# def ignored_models(self):
# if not getattr(self, '__ignored_models', None):
# from django.db.models.loading import get_models, get_model, get_app
# from landlord.conf import settings
# models = set()
# ignored_apps = tuple(settings.LANDLORD_ROUTER_IGNORED_APPS)
# for app in ignored_apps:
# models.update(set(get_models(get_app(app))))
# ignored_models = settings.LANDLORD_ROUTER_IGNORED_MODELS
# for model in ignored_models:
# models.add(get_model(*model.split('.')))
# self.__ignored_models = tuple(models)
# logger.info('Ignored models: %s', self.__ignored_models)
# return self.__ignored_models
# def db_for_read(self, model, **hints):
# if model in self.ignored_models:
# return None
# tenant = hints.get(settings.LANDLORD_DEFAULT_TENANT_KEY, landlord.get_current_tenant())
# if tenant:
# return unicode(tenant)
# def db_for_write(self, model, **hints):
# if model in self.ignored_models:
# return None
# tenant = hints.get(settings.LANDLORD_DEFAULT_TENANT_KEY, landlord.get_current_tenant())
# if tenant:
# return str(tenant)
# def allow_syncdb(self, db, model):
# if db in ['default']:
# return True
# if model in self.ignored_models:
# logger.info('Ignoring syncdb on %s for database %s', model, db)
# return False
# def allow_relation(self, obj1, obj2, **hints):
# settings.LANDLORD_ROUTER_RELATION_IGNORE
# return landlord.current_tenant
# print 'allow_relation: ', obj1, obj2, hints
# import logging
# logger = logging.getLogger(__name__)
def enable_router_hints():
import new
from django.db import DEFAULT_DB_ALIAS, router
from landlord.conf import settings
def _router_func(action):
def _route_db(self, model, **hints):
from landlord import landlord
hints.update({
settings.LANDLORD_DEFAULT_NAMESPACE_KEY: landlord.get_current_namespace(),
})
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
try:
return hints['instance']._state.db or DEFAULT_DB_ALIAS
except KeyError:
return DEFAULT_DB_ALIAS
return _route_db
router.db_for_read = new.instancemethod(_router_func('db_for_read'), router, None)
router.db_for_write = new.instancemethod(_router_func('db_for_write'), router, None)
|
bsd-3-clause
|
JackDandy/SickGear
|
lib/hachoir_py2/parser/archive/ace.py
|
2
|
10181
|
"""
ACE parser
From wotsit.org and the SDK header (bitflags)
Partial study of a new block type (5) I've called "new_recovery", as its
syntax is very close to the former one (of type 2).
Status: can only read totally file and header blocks.
Author: Christophe Gisquet <[email protected]>
Creation date: 19 january 2006
"""
from hachoir_py2.parser import Parser
from hachoir_py2.field import (StaticFieldSet, FieldSet,
Bit, Bits, NullBits, RawBytes, Enum,
UInt8, UInt16, UInt32,
PascalString8, PascalString16, String,
TimeDateMSDOS32)
from hachoir_py2.core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_py2.core.endian import LITTLE_ENDIAN
from hachoir_py2.parser.common.msdos import MSDOSFileAttr32
MAGIC = "**ACE**"
OS_MSDOS = 0
OS_WIN32 = 2
HOST_OS = {
0: "MS-DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
4: "MAC-OS",
5: "Win NT",
6: "Primos",
7: "APPLE GS",
8: "ATARI",
9: "VAX VMS",
10: "AMIGA",
11: "NEXT",
}
COMPRESSION_TYPE = {
0: "Store",
1: "Lempel-Ziv 77",
2: "ACE v2.0",
}
COMPRESSION_MODE = {
0: "fastest",
1: "fast",
2: "normal",
3: "good",
4: "best",
}
# TODO: Computing the CRC16 would also prove useful
# def markerValidate(self):
# return not self["extend"].value and self["signature"].value == MAGIC and \
# self["host_os"].value<12
class MarkerFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Whether the archive has a comment"),
(NullBits, "unused", 7, "Reserved bits"),
(Bit, "sfx", "SFX"),
(Bit, "limited_dict", "Junior SFX with 256K dictionary"),
(Bit, "multi_volume", "Part of a set of ACE archives"),
(Bit, "has_av_string", "This header holds an AV-string"),
(Bit, "recovery_record", "Recovery record preset"),
(Bit, "locked", "Archive is locked"),
(Bit, "solid", "Archive uses solid compression")
)
def markerFlags(self):
yield MarkerFlags(self, "flags", "Marker flags")
def markerHeader(self):
yield String(self, "signature", 7, "Signature")
yield UInt8(self, "ver_extract", "Version needed to extract archive")
yield UInt8(self, "ver_created", "Version used to create archive")
yield Enum(UInt8(self, "host_os", "OS where the files were compressed"), HOST_OS)
yield UInt8(self, "vol_num", "Volume number")
yield TimeDateMSDOS32(self, "time", "Date and time (MS DOS format)")
yield Bits(self, "reserved", 64, "Reserved size for future extensions")
flags = self["flags"]
if flags["has_av_string"].value:
yield PascalString8(self, "av_string", "AV String")
if flags["has_comment"].value:
size = filesizeHandler(UInt16(self, "comment_size", "Comment size"))
yield size
if size.value > 0:
yield RawBytes(self, "compressed_comment", size.value, \
"Compressed comment")
class FileFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Presence of file comment"),
(Bits, "unused", 10, "Unused bit flags"),
(Bit, "encrypted", "File encrypted with password"),
(Bit, "previous", "File continued from previous volume"),
(Bit, "next", "File continues on the next volume"),
(Bit, "solid", "File compressed using previously archived files")
)
def fileFlags(self):
yield FileFlags(self, "flags", "File flags")
def fileHeader(self):
yield filesizeHandler(UInt32(self, "compressed_size", "Size of the compressed file"))
yield filesizeHandler(UInt32(self, "uncompressed_size", "Uncompressed file size"))
yield TimeDateMSDOS32(self, "ftime", "Date and time (MS DOS format)")
if self["/header/host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(self, "file_attr", "File attributes")
else:
yield textHandler(UInt32(self, "file_attr", "File attributes"), hexadecimal)
yield textHandler(UInt32(self, "file_crc32", "CRC32 checksum over the compressed file)"), hexadecimal)
yield Enum(UInt8(self, "compression_type", "Type of compression"), COMPRESSION_TYPE)
yield Enum(UInt8(self, "compression_mode", "Quality of compression"), COMPRESSION_MODE)
yield textHandler(UInt16(self, "parameters", "Compression parameters"), hexadecimal)
yield textHandler(UInt16(self, "reserved", "Reserved data"), hexadecimal)
# Filename
yield PascalString16(self, "filename", "Filename")
# Comment
if self["flags/has_comment"].value:
yield filesizeHandler(UInt16(self, "comment_size", "Size of the compressed comment"))
if self["comment_size"].value > 0:
yield RawBytes(self, "comment_data", self["comment_size"].value, "Comment data")
def fileBody(self):
size = self["compressed_size"].value
if size > 0:
yield RawBytes(self, "compressed_data", size, "Compressed data")
def fileDesc(self):
return "File entry: %s (%s)" % (self["filename"].value, self["compressed_size"].display)
def recoveryHeader(self):
yield filesizeHandler(UInt32(self, "rec_blk_size", "Size of recovery data"))
self.body_size = self["rec_blk_size"].size
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Relative start (to this block) of the data this block is mode of"),
hexadecimal)
yield UInt32(self, "num_blocks", "Number of blocks the data is split into")
yield UInt32(self, "size_blocks", "Size of these blocks")
yield UInt16(self, "crc16_blocks", "CRC16 over recovery data")
# size_blocks blocks of size size_blocks follow
# The ultimate data is the xor data of all those blocks
size = self["size_blocks"].value
for index in xrange(self["num_blocks"].value):
yield RawBytes(self, "data[]", size, "Recovery block %i" % index)
yield RawBytes(self, "xor_data", size, "The XOR value of the above data blocks")
def recoveryDesc(self):
return "Recovery block, size=%u" % self["body_size"].display
def newRecoveryHeader(self):
"""
This header is described nowhere
"""
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
yield textHandler(UInt32(self, "unknown[]", "Unknown field, probably 0"),
hexadecimal)
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Offset (=crc16's) of this block in the file"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]",
"Unknown field, probably 0"), hexadecimal)
class BaseFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(NullBits, "unused", 15, "Unused bit flags")
)
def parseFlags(self):
yield BaseFlags(self, "flags", "Unknown flags")
def parseHeader(self):
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
def parseBody(self):
if self.body_size > 0:
yield RawBytes(self, "body_data", self.body_size, "Body data, unhandled")
class Block(FieldSet):
TAG_INFO = {
0: ("header", "Archiver header", markerFlags, markerHeader, None),
1: ("file[]", fileDesc, fileFlags, fileHeader, fileBody),
2: ("recovery[]", recoveryDesc, recoveryHeader, None, None),
5: ("new_recovery[]", None, None, newRecoveryHeader, None)
}
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self.body_size = 0
self.desc_func = None
type = self["block_type"].value
if type in self.TAG_INFO:
self._name, desc, self.parseFlags, self.parseHeader, self.parseBody = self.TAG_INFO[type]
if desc:
if isinstance(desc, str):
self._description = desc
else:
self.desc_func = desc
else:
self.warning("Processing as unknown block block of type %u" % type)
if not self.parseFlags:
self.parseFlags = parseFlags
if not self.parseHeader:
self.parseHeader = parseHeader
if not self.parseBody:
self.parseBody = parseBody
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Archive CRC16 (from byte 4 on)"), hexadecimal)
yield filesizeHandler(UInt16(self, "head_size", "Block size (from byte 4 on)"))
yield UInt8(self, "block_type", "Block type")
# Flags
for flag in self.parseFlags(self):
yield flag
# Rest of the header
for field in self.parseHeader(self):
yield field
size = self["head_size"].value - (self.current_size // 8) + (2 + 2)
if size > 0:
yield RawBytes(self, "extra_data", size, "Extra header data, unhandled")
# Body in itself
for field in self.parseBody(self):
yield field
def createDescription(self):
if self.desc_func:
return self.desc_func(self)
else:
return "Block: %s" % self["type"].display
class AceFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "ace",
"category": "archive",
"file_ext": ("ace",),
"mime": (u"application/x-ace-compressed",),
"min_size": 50 * 8,
"description": "ACE archive"
}
def validate(self):
if self.stream.readBytes(7 * 8, len(MAGIC)) != MAGIC:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.