repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jjmleiro/hue
|
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/geoapp/test_regress.py
|
47
|
4031
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import no_mysql, no_spatialite
from django.contrib.gis.shortcuts import render_to_kmz
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, no_oracle
from django.db.models import Count, Min
from django.test import TestCase
from django.utils.unittest import skipUnless
if HAS_GEOS:
from .models import City, PennsylvaniaCity, State, Truth
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class GeoRegressionTests(TestCase):
def test_update(self):
"Testing GeoQuerySet.update(). See #10411."
pnt = City.objects.get(name='Pueblo').point
bak = pnt.clone()
pnt.y += 0.005
pnt.x += 0.005
City.objects.filter(name='Pueblo').update(point=pnt)
self.assertEqual(pnt, City.objects.get(name='Pueblo').point)
City.objects.filter(name='Pueblo').update(point=bak)
self.assertEqual(bak, City.objects.get(name='Pueblo').point)
def test_kmz(self):
"Testing `render_to_kmz` with non-ASCII data. See #11624."
name = "Åland Islands"
places = [{'name' : name,
'description' : name,
'kml' : '<Point><coordinates>5.0,23.0</coordinates></Point>'
}]
kmz = render_to_kmz('gis/kml/placemarks.kml', {'places' : places})
@no_spatialite
@no_mysql
def test_extent(self):
"Testing `extent` on a table with a single point. See #11827."
pnt = City.objects.get(name='Pueblo').point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name='Pueblo').extent()
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
def test_unicode_date(self):
"Testing dates are converted properly, even on SpatiaLite. See #16408."
founded = datetime(1857, 5, 23)
mansfield = PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)',
founded=founded)
self.assertEqual(founded, PennsylvaniaCity.objects.datetimes('founded', 'day')[0])
self.assertEqual(founded, PennsylvaniaCity.objects.aggregate(Min('founded'))['founded__min'])
def test_empty_count(self):
"Testing that PostGISAdapter.__eq__ does check empty strings. See #13670."
# contrived example, but need a geo lookup paired with an id__in lookup
pueblo = City.objects.get(name='Pueblo')
state = State.objects.filter(poly__contains=pueblo.point)
cities_within_state = City.objects.filter(id__in=state)
# .count() should not throw TypeError in __eq__
self.assertEqual(cities_within_state.count(), 1)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test_defer_or_only_with_annotate(self):
"Regression for #16409. Make sure defer() and only() work with annotate()"
self.assertIsInstance(list(City.objects.annotate(Count('point')).defer('name')), list)
self.assertIsInstance(list(City.objects.annotate(Count('point')).only('name')), list)
def test_boolean_conversion(self):
"Testing Boolean value conversion with the spatial backend, see #15169."
t1 = Truth.objects.create(val=True)
t2 = Truth.objects.create(val=False)
val1 = Truth.objects.get(pk=t1.pk).val
val2 = Truth.objects.get(pk=t2.pk).val
# verify types -- should't be 0/1
self.assertIsInstance(val1, bool)
self.assertIsInstance(val2, bool)
# verify values
self.assertEqual(val1, True)
self.assertEqual(val2, False)
|
apache-2.0
|
Cuuuurzel/KiPyCalc
|
sympy/vector/tests/test_dyadic.py
|
94
|
4076
|
from sympy import sin, cos, symbols, pi, ImmutableMatrix as Matrix, \
simplify
from sympy.vector import (CoordSysCartesian, Vector, Dyadic,
DyadicAdd, DyadicMul, DyadicZero,
BaseDyadic, express)
A = CoordSysCartesian('A')
def test_dyadic():
a, b = symbols('a, b')
assert Dyadic.zero != 0
assert isinstance(Dyadic.zero, DyadicZero)
assert BaseDyadic(A.i, A.j) != BaseDyadic(A.j, A.i)
assert (BaseDyadic(Vector.zero, A.i) ==
BaseDyadic(A.i, Vector.zero) == Dyadic.zero)
d1 = A.i | A.i
d2 = A.j | A.j
d3 = A.i | A.j
assert isinstance(d1, BaseDyadic)
d_mul = a*d1
assert isinstance(d_mul, DyadicMul)
assert d_mul.base_dyadic == d1
assert d_mul.measure_number == a
assert isinstance(a*d1 + b*d3, DyadicAdd)
assert d1 == A.i.outer(A.i)
assert d3 == A.i.outer(A.j)
v1 = a*A.i - A.k
v2 = A.i + b*A.j
assert v1 | v2 == v1.outer(v2) == a * (A.i|A.i) + (a*b) * (A.i|A.j) +\
- (A.k|A.i) - b * (A.k|A.j)
assert d1 * 0 == Dyadic.zero
assert d1 != Dyadic.zero
assert d1 * 2 == 2 * (A.i | A.i)
assert d1 / 2. == 0.5 * d1
assert d1.dot(0 * d1) == Vector.zero
assert d1 & d2 == Dyadic.zero
assert d1.dot(A.i) == A.i == d1 & A.i
assert d1.cross(Vector.zero) == Dyadic.zero
assert d1.cross(A.i) == Dyadic.zero
assert d1 ^ A.j == d1.cross(A.j)
assert d1.cross(A.k) == - A.i | A.j
assert d2.cross(A.i) == - A.j | A.k == d2 ^ A.i
assert A.i ^ d1 == Dyadic.zero
assert A.j.cross(d1) == - A.k | A.i == A.j ^ d1
assert Vector.zero.cross(d1) == Dyadic.zero
assert A.k ^ d1 == A.j | A.i
assert A.i.dot(d1) == A.i & d1 == A.i
assert A.j.dot(d1) == Vector.zero
assert Vector.zero.dot(d1) == Vector.zero
assert A.j & d2 == A.j
assert d1.dot(d3) == d1 & d3 == A.i | A.j == d3
assert d3 & d1 == Dyadic.zero
q = symbols('q')
B = A.orient_new_axis('B', q, A.k)
assert express(d1, B) == express(d1, B, B)
assert express(d1, B) == ((cos(q)**2) * (B.i | B.i) + (-sin(q) * cos(q)) *
(B.i | B.j) + (-sin(q) * cos(q)) * (B.j | B.i) + (sin(q)**2) *
(B.j | B.j))
assert express(d1, B, A) == (cos(q)) * (B.i | A.i) + (-sin(q)) * (B.j | A.i)
assert express(d1, A, B) == (cos(q)) * (A.i | B.i) + (-sin(q)) * (A.i | B.j)
assert d1.to_matrix(A) == Matrix([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
assert d1.to_matrix(A, B) == Matrix([[cos(q), -sin(q), 0],
[0, 0, 0],
[0, 0, 0]])
assert d3.to_matrix(A) == Matrix([[0, 1, 0], [0, 0, 0], [0, 0, 0]])
a, b, c, d, e, f = symbols('a, b, c, d, e, f')
v1 = a * A.i + b * A.j + c * A.k
v2 = d * A.i + e * A.j + f * A.k
d4 = v1.outer(v2)
assert d4.to_matrix(A) == Matrix([[a * d, a * e, a * f],
[b * d, b * e, b * f],
[c * d, c * e, c * f]])
d5 = v1.outer(v1)
C = A.orient_new_axis('C', q, A.i)
for expected, actual in zip(C.rotation_matrix(A) * d5.to_matrix(A) * \
C.rotation_matrix(A).T, d5.to_matrix(C)):
assert (expected - actual).simplify() == 0
def test_dyadic_simplify():
x, y, z, k, n, m, w, f, s, A = symbols('x, y, z, k, n, m, w, f, s, A')
N = CoordSysCartesian('N')
dy = N.i | N.i
test1 = (1 / x + 1 / y) * dy
assert (N.i & test1 & N.i) != (x + y) / (x * y)
test1 = test1.simplify()
assert test1.simplify() == simplify(test1)
assert (N.i & test1 & N.i) == (x + y) / (x * y)
test2 = (A**2 * s**4 / (4 * pi * k * m**3)) * dy
test2 = test2.simplify()
assert (N.i & test2 & N.i) == (A**2 * s**4 / (4 * pi * k * m**3))
test3 = ((4 + 4 * x - 2 * (2 + 2 * x)) / (2 + 2 * x)) * dy
test3 = test3.simplify()
assert (N.i & test3 & N.i) == 0
test4 = ((-4 * x * y**2 - 2 * y**3 - 2 * x**2 * y) / (x + y)**2) * dy
test4 = test4.simplify()
assert (N.i & test4 & N.i) == -2 * y
|
mit
|
sinhrks/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits_active_learning.py
|
294
|
3417
|
"""
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
|
bsd-3-clause
|
bakerlover/lab5
|
main/model.py
|
2
|
2082
|
# -*- coding: utf-8 -*-
import os
from google.appengine.api import app_identity
from google.appengine.ext import ndb
import modelx
import util
# The timestamp of the currently deployed version
TIMESTAMP = long(os.environ.get('CURRENT_VERSION_ID').split('.')[1]) >> 28
class Base(ndb.Model, modelx.BaseX):
created = ndb.DateTimeProperty(auto_now_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
version = ndb.IntegerProperty(default=TIMESTAMP)
_PROPERTIES = {
'key',
'id',
'version',
'created',
'modified',
}
class Config(Base, modelx.ConfigX):
analytics_id = ndb.StringProperty(default='')
announcement_html = ndb.TextProperty(default='')
announcement_type = ndb.StringProperty(default='info', choices=[
'info', 'warning', 'success', 'danger',
])
brand_name = ndb.StringProperty(default=app_identity.get_application_id())
facebook_app_id = ndb.StringProperty(default='')
facebook_app_secret = ndb.StringProperty(default='')
feedback_email = ndb.StringProperty(default='')
flask_secret_key = ndb.StringProperty(default=util.uuid())
twitter_consumer_key = ndb.StringProperty(default='')
twitter_consumer_secret = ndb.StringProperty(default='')
_PROPERTIES = Base._PROPERTIES.union({
'analytics_id',
'announcement_html',
'announcement_type',
'brand_name',
'facebook_app_id',
'facebook_app_secret',
'feedback_email',
'flask_secret_key',
'twitter_consumer_key',
'twitter_consumer_secret',
})
class User(Base, modelx.UserX):
name = ndb.StringProperty(required=True)
username = ndb.StringProperty(required=True)
email = ndb.StringProperty(default='')
auth_ids = ndb.StringProperty(repeated=True)
active = ndb.BooleanProperty(default=True)
admin = ndb.BooleanProperty(default=False)
permissions = ndb.StringProperty(repeated=True)
_PROPERTIES = Base._PROPERTIES.union({
'active',
'admin',
'auth_ids',
'avatar_url',
'email',
'name',
'username',
'permissions',
})
|
mit
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/pip/_vendor/packaging/_structures.py
|
906
|
1809
|
# Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
|
agpl-3.0
|
powerjg/gem5-ci-test
|
configs/example/ruby_gpu_random_test.py
|
10
|
6423
|
#
# Copyright (c) 2010-2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Brad Beckmann
#
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
import os, optparse, sys
addToPath('../')
from common import Options
from ruby import Ruby
# Get paths we might need.
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
m5_root = os.path.dirname(config_root)
parser = optparse.OptionParser()
Options.addNoISAOptions(parser)
parser.add_option("--maxloads", metavar="N", default=100,
help="Stop after N loads")
parser.add_option("-f", "--wakeup_freq", metavar="N", default=10,
help="Wakeup every N cycles")
parser.add_option("-u", "--num-compute-units", type="int", default=1,
help="number of compute units in the GPU")
parser.add_option("--num-cp", type="int", default=0,
help="Number of GPU Command Processors (CP)")
# not super important now, but to avoid putting the number 4 everywhere, make
# it an option/knob
parser.add_option("--cu-per-sqc", type="int", default=4, help="number of CUs \
sharing an SQC (icache, and thus icache TLB)")
parser.add_option("--simds-per-cu", type="int", default=4, help="SIMD units" \
"per CU")
parser.add_option("--wf-size", type="int", default=64,
help="Wavefront size(in workitems)")
parser.add_option("--wfs-per-simd", type="int", default=10, help="Number of " \
"WF slots per SIMD")
#
# Add the ruby specific and protocol specific options
#
Ruby.define_options(parser)
execfile(os.path.join(config_root, "common", "Options.py"))
(options, args) = parser.parse_args()
#
# Set the default cache size and associativity to be very small to encourage
# races between requests and writebacks.
#
options.l1d_size="256B"
options.l1i_size="256B"
options.l2_size="512B"
options.l3_size="1kB"
options.l1d_assoc=2
options.l1i_assoc=2
options.l2_assoc=2
options.l3_assoc=2
# This file can support multiple compute units
assert(options.num_compute_units >= 1)
n_cu = options.num_compute_units
options.num_sqc = int((n_cu + options.cu_per_sqc - 1) / options.cu_per_sqc)
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
#
# Create the ruby random tester
#
# Check to for the GPU_RfO protocol. Other GPU protocols are non-SC and will
# not work with the Ruby random tester.
assert(buildEnv['PROTOCOL'] == 'GPU_RfO')
# The GPU_RfO protocol does not support cache flushes
check_flush = False
tester = RubyTester(check_flush=check_flush,
checks_to_complete=options.maxloads,
wakeup_frequency=options.wakeup_freq,
deadlock_threshold=1000000)
#
# Create the M5 system. Note that the Memory Object isn't
# actually used by the rubytester, but is included to support the
# M5 memory size == Ruby memory size checks
#
system = System(cpu=tester, mem_ranges=[AddrRange(options.mem_size)])
# Create a top-level voltage domain and clock domain
system.voltage_domain = VoltageDomain(voltage=options.sys_voltage)
system.clk_domain = SrcClockDomain(clock=options.sys_clock,
voltage_domain=system.voltage_domain)
Ruby.create_system(options, False, system)
# Create a seperate clock domain for Ruby
system.ruby.clk_domain = SrcClockDomain(clock=options.ruby_clock,
voltage_domain=system.voltage_domain)
tester.num_cpus = len(system.ruby._cpu_ports)
#
# The tester is most effective when randomization is turned on and
# artifical delay is randomly inserted on messages
#
system.ruby.randomization = True
for ruby_port in system.ruby._cpu_ports:
#
# Tie the ruby tester ports to the ruby cpu read and write ports
#
if ruby_port.support_data_reqs and ruby_port.support_inst_reqs:
tester.cpuInstDataPort = ruby_port.slave
elif ruby_port.support_data_reqs:
tester.cpuDataPort = ruby_port.slave
elif ruby_port.support_inst_reqs:
tester.cpuInstPort = ruby_port.slave
# Do not automatically retry stalled Ruby requests
ruby_port.no_retry_on_stall = True
#
# Tell each sequencer this is the ruby tester so that it
# copies the subblock back to the checker
#
ruby_port.using_ruby_tester = True
# -----------------------
# run simulation
# -----------------------
root = Root( full_system = False, system = system )
root.system.mem_mode = 'timing'
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
# instantiate configuration
m5.instantiate()
# simulate until program terminates
exit_event = m5.simulate(options.abs_max_tick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
|
bsd-3-clause
|
igrechuhin/omim
|
3party/protobuf/python/google/protobuf/text_encoding.py
|
81
|
4696
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#PY25 compatible for GAE.
#
"""Encoding related utilities."""
import re
import sys ##PY25
# Lookup table for utf8
_cescape_utf8_to_str = [chr(i) for i in xrange(0, 256)]
_cescape_utf8_to_str[9] = r'\t' # optional escape
_cescape_utf8_to_str[10] = r'\n' # optional escape
_cescape_utf8_to_str[13] = r'\r' # optional escape
_cescape_utf8_to_str[39] = r"\'" # optional escape
_cescape_utf8_to_str[34] = r'\"' # necessary escape
_cescape_utf8_to_str[92] = r'\\' # necessary escape
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'\%03o' % i for i in xrange(0, 32)] +
[chr(i) for i in xrange(32, 127)] +
[r'\%03o' % i for i in xrange(127, 256)])
_cescape_byte_to_str[9] = r'\t' # optional escape
_cescape_byte_to_str[10] = r'\n' # optional escape
_cescape_byte_to_str[13] = r'\r' # optional escape
_cescape_byte_to_str[39] = r"\'" # optional escape
_cescape_byte_to_str[34] = r'\"' # necessary escape
_cescape_byte_to_str[92] = r'\\' # necessary escape
def CEscape(text, as_utf8):
"""Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
"""
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
Ord = ord if isinstance(text, basestring) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])')
_cescape_highbit_to_str = ([chr(i) for i in range(0, 127)] +
[r'\%03o' % i for i in range(127, 256)])
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes."""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if sys.version_info[0] < 3: ##PY25
##!PY25 if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape'))
|
apache-2.0
|
python-acoustics/Sea
|
Sea/adapter/components/Component2DPlate.py
|
2
|
3440
|
"""
Adapter classes for :class:`Sea.model.components.Component2DPlate`
"""
import Sea
from ComponentStructural import ComponentStructural
from ..subsystems import SubsystemStructuralLong, SubsystemStructuralBend, SubsystemStructuralShear
class SubsystemLong(SubsystemStructuralLong, Sea.model.components.Component2DPlate.SubsystemLong):
"""Subsystem for longitudinal waves in a plate.
"""
pass
class SubsystemBend(SubsystemStructuralBend, Sea.model.components.Component2DPlate.SubsystemBend):
"""Subsystem for bending waves in a plate.
"""
pass
class SubsystemShear(SubsystemStructuralShear, Sea.model.components.Component2DPlate.SubsystemShear):
"""Subsysem for shear waves in a plate.
"""
pass
class Component2DPlate(ComponentStructural, Sea.model.components.Component2DPlate.Component2DPlate):
"""Plate structural component.
This adapter describes a :class:`Sea.model.components.Component2DPlate`
"""
name = 'Plate'
description = 'A structural component with wave propagation along two dimensions.'
def __init__(self, obj, system, material, part):
obj.addProperty("App::PropertyFloat", "Length", self.name, "Length of the plate.")
obj.setEditorMode("Length", 1)
obj.addProperty("App::PropertyFloat", "Width", self.name, "Width of the plate.")
obj.setEditorMode("Width", 1)
ComponentStructural.__init__(self, obj, system, material, part)
obj.addProperty("App::PropertyFloat", "Area", self.name, "Area of the plate.")
obj.setEditorMode("Area", 1)
obj.addProperty("App::PropertyFloat", "Thickness", self.name, "Thickness of the plate.")
obj.setEditorMode("Thickness", 1)
obj.addProperty("App::PropertyFloat", "MassPerArea", self.name, "Mass per unit area.")
obj.setEditorMode("MassPerArea", 1)
self.calc_area_and_thickness(obj)
self.SubsystemLong = obj.makeSubsystem(SubsystemLong)
self.SubsystemBend = obj.makeSubsystem(SubsystemBend)
self.SubsystemShear = obj.makeSubsystem(SubsystemShear)
def onChanged(self, obj, prop):
ComponentStructural.onChanged(self, obj, prop)
if prop == 'Area':
obj.Proxy.area = obj.Area
elif prop == 'Thickness':
obj.Proxy.thickness = obj.Thickness
elif prop == 'Length':
obj.Proxy.length = obj.Length
elif prop == 'Width':
obj.Proxy.width = obj.Width
if prop == 'Shape':
box = obj.Shape.BoundBox
dim = [box.XLength, box.YLength, box.ZLength]
smallest = min(dim)
largest = max(dim)
obj.Length = largest
dim.remove(smallest)
dim.remove(largest)
obj.Width = dim[0]
def execute(self, obj):
ComponentStructural.execute(self, obj)
self.calc_area_and_thickness(obj)
obj.MassPerArea = obj.Proxy.mass_per_area
def calc_area_and_thickness(self, obj):
"""
Determine the area and thickness of the plate.
"""
box = obj.Shape.BoundBox
dim = [ box.XLength, box.YLength, box.ZLength ]
obj.Thickness = min(dim)
dim.remove(obj.Thickness)
obj.Area = dim[0] * dim[1]
|
bsd-3-clause
|
jmhsi/justin_tinker
|
data_science/courses/learning_dl_packages/models/research/qa_kg/util/misc.py
|
10
|
2515
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from datetime import datetime
import json
import logging
import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
def prepare_dirs_and_logger(config):
formatter = logging.Formatter('%(asctime)s:%(levelname)s::%(message)s')
logger = logging.getLogger('tensorflow')
for hdlr in logger.handlers:
logger.removeHandler(hdlr)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(tf.logging.INFO)
config.log_dir = os.path.join(config.exp_dir, config.log_dir,
config.train_tag)
config.model_dir = os.path.join(config.exp_dir, config.model_dir,
config.train_tag)
config.output_dir = os.path.join(config.exp_dir, config.output_dir,
config.train_tag)
for path in [
config.log_dir, config.model_dir, config.output_dir
]:
if not os.path.exists(path):
os.makedirs(path)
config.data_files = {
'train': os.path.join(config.data_dir, config.train_data_file),
'dev': os.path.join(config.data_dir, config.dev_data_file),
'test': os.path.join(config.data_dir, config.test_data_file)
}
return config
def get_time():
return datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def save_config(config):
param_path = os.path.join(config.model_dir, 'params.json')
tf.logging.info('log dir: %s' % config.log_dir)
tf.logging.info('model dir: %s' % config.model_dir)
tf.logging.info('param path: %s' % param_path)
tf.logging.info('output dir: %s' % config.output_dir)
with open(param_path, 'w') as f:
f.write(json.dumps(config.__dict__, indent=4, sort_keys=True))
|
apache-2.0
|
Hydriz/Balchivist
|
balchivist/archiver.py
|
1
|
9178
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2015-2018 Hydriz Scholz
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import internetarchive
from . import BALVERSION
import common
from exception import IncorrectUsage
import message
class BALArchiver(object):
def __init__(self, identifier='', retries=3, debug=False, verbose=False):
"""
This module is used for providing regular functions used for
uploading files into the Internet Archive. It is an extension of
the internetarchive python library, but with better error handling.
- identifier (string): The identifier for the item.
- retries (int): The number of times to retry a request to the server.
- debug (boolean): Whether or not to provide debugging output.
- verbose (boolean): Whether or not to provide more verbosity.
"""
self.retries = retries
self.identifier = identifier
self.debug = debug
self.verbose = verbose
self.common = common.BALCommon(debug=debug, verbose=verbose)
# Files that are present by default in all Internet Archive items
self.defaultFiles = [
'%s_archive.torrent' % (identifier),
'%s_files.xml' % (identifier),
'%s_meta.sqlite' % (identifier),
'%s_meta.xml' % (identifier)
]
def handleException(self, exception):
"""
This function is for handling exceptions caught when making a request
to the Internet Archive.
- exception (object): The exception object caught.
"""
msg = "%s was caught" % (type(exception).__name__)
self.common.giveDebugMessage(msg)
def getFileList(self):
"""
This function is used to get the list of files in an item and excludes
the default files that are present in all Internet Archive items.
Returns: List of files in the item excluding default files in
alphabetical order. False if an error has occurred.
"""
tries = 0
while tries < self.retries:
try:
iaitem = internetarchive.get_item(identifier=self.identifier)
break
except Exception as exception:
self.handleException(exception=exception)
if tries == self.retries:
return False
else:
tries += 1
time.sleep(60*tries)
filelist = []
for thefile in iaitem.files:
filename = thefile['name']
if filename in self.defaultFiles:
continue
else:
filelist.append(filename)
return sorted(filelist)
def getMd5Sums(self, dumpfile):
"""
This function will get the md5sums for a given file in the item on the
Internet Archive.
- dumpfile (string): The name of the file to get the md5sums for.
Returns: String with the md5sums.
"""
tries = 0
while tries < self.retries:
try:
iaitem = internetarchive.get_item(identifier=self.identifier)
break
except Exception as exception:
self.handleException(exception=exception)
if tries == self.retries:
return False
else:
tries += 1
time.sleep(60*tries)
thefile = iaitem.get_files(dumpfile)
return thefile.md5
def uploadFile(self, body, metadata={}, headers={}, queuederive=False,
verify=True):
"""
This function will upload a single file to the item on the Internet
Archive.
- body (string or list): The path to the file(s) to upload.
- metadata (dict): The metadata for the Internet Archive item.
- headers (dict): The headers to send when sending the request.
- queuederive (boolean): Whether or not to derive the item after the
file is uploaded.
- verify (boolean): Whether or not to verify that the file is uploaded.
Returns: True if the file is successfully uploaded, False if errors
are encountered.
TODO: Implement multipart uploading.
"""
if not metadata.get('scanner'):
scanner = 'Balchivist Python Library %s' % (BALVERSION)
metadata['scanner'] = scanner
tries = 0
iaupload = internetarchive.upload
while tries < self.retries:
try:
iaupload(identifier=self.identifier, files=body,
metadata=metadata, headers=headers,
queue_derive=queuederive, verbose=self.verbose,
verify=verify, debug=self.debug, retries=self.retries)
return True
except Exception as exception:
self.handleException(exception=exception)
if tries == self.retries:
return False
else:
tries += 1
time.sleep(60*tries)
def modifyMetadata(self, metadata, target='metadata', append=False,
priority=None):
"""
This function will modify the metadata of an item on the Internet
Archive.
- metadata (dict): The metadata to modify for the item.
- target (string): The metadata target to update.
- append (boolean): Whether or not to append the metadata values to the
current values instead of replacing them.
- priority (int): The priority for the metadata update task.
Returns: True if the modification is successful, False if otherwise.
"""
if not metadata.get('scanner'):
scanner = 'Balchivist Python Library %s' % (BALVERSION)
metadata['scanner'] = scanner
tries = 0
iamodifymd = internetarchive.modify_metadata
while tries < self.retries:
try:
iamodifymd(identifier=self.identifier, metadata=metadata,
target=target, append=append, priority=priority,
debug=self.debug)
return True
except Exception as exception:
self.handleException(exception=exception)
if tries == self.retries:
return False
else:
tries += 1
time.sleep(60*tries)
def upload(self, body, metadata={}, headers={}, queuederive=False,
verify=True):
"""
This function acts as a wrapper for the uploadFile function, but adds
additional functionality to ensure better error handling.
- body (string or list): The path to the file(s) to upload.
- metadata (dict): The metadata for the Internet Archive item.
- headers (dict): The headers to send when sending the request.
- queuederive (boolean): Whether or not to derive the item after the
file is uploaded.
- verify (boolean): Whether or not to verify that the file is uploaded.
Returns: True if process is successful, False if otherwise.
"""
count = 0
for dumpfile in body:
self.common.giveMessage("Uploading file: %s" % (dumpfile))
time.sleep(1) # For Ctrl+C
if count == 0:
upload = self.uploadFile(dumpfile, metadata=metadata,
headers=headers, verify=verify,
queuederive=queuederive)
# Allow the Internet Archive to process the item creation
if self.debug:
pass
else:
timenow = time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime())
self.common.giveMessage("Sleeping for 30 seconds, %s" %
(timenow))
time.sleep(30)
else:
upload = self.uploadFile(dumpfile, queuederive=queuederive,
verify=verify)
if upload:
self.common.giveDebugMessage(upload)
count += 1
else:
return False
return True
if __name__ == '__main__':
BALMessage = message.BALMessage()
raise IncorrectUsage(BALMessage.getMessage('exception-incorrectusage'))
|
gpl-3.0
|
rickerc/nova_audit
|
nova/tests/test_crypto.py
|
33
|
10137
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Crypto module.
"""
import os
import mox
from nova import crypto
from nova import db
from nova import exception
from nova.openstack.common import processutils
from nova import test
from nova import utils
class X509Test(test.TestCase):
def test_can_generate_x509(self):
with utils.tempdir() as tmpdir:
self.flags(ca_path=tmpdir)
crypto.ensure_ca_filesystem()
_key, cert_str = crypto.generate_x509_cert('fake', 'fake')
project_cert = crypto.fetch_ca(project_id='fake')
signed_cert_file = os.path.join(tmpdir, "signed")
with open(signed_cert_file, 'w') as keyfile:
keyfile.write(cert_str)
project_cert_file = os.path.join(tmpdir, "project")
with open(project_cert_file, 'w') as keyfile:
keyfile.write(project_cert)
enc, err = utils.execute('openssl', 'verify', '-CAfile',
project_cert_file, '-verbose', signed_cert_file)
self.assertFalse(err)
def test_encrypt_decrypt_x509(self):
with utils.tempdir() as tmpdir:
self.flags(ca_path=tmpdir)
project_id = "fake"
crypto.ensure_ca_filesystem()
cert = crypto.fetch_ca(project_id)
public_key = os.path.join(tmpdir, "public.pem")
with open(public_key, 'w') as keyfile:
keyfile.write(cert)
text = "some @#!%^* test text"
enc, _err = utils.execute('openssl',
'rsautl',
'-certin',
'-encrypt',
'-inkey', '%s' % public_key,
process_input=text)
dec = crypto.decrypt_text(project_id, enc)
self.assertEqual(text, dec)
class RevokeCertsTest(test.TestCase):
def test_revoke_certs_by_user_and_project(self):
user_id = 'test_user'
project_id = 2
file_name = 'test_file'
def mock_certificate_get_all_by_user_and_project(context,
user_id,
project_id):
return [{"user_id": user_id, "project_id": project_id,
"file_name": file_name}]
self.stubs.Set(db, 'certificate_get_all_by_user_and_project',
mock_certificate_get_all_by_user_and_project)
self.mox.StubOutWithMock(crypto, 'revoke_cert')
crypto.revoke_cert(project_id, file_name)
self.mox.ReplayAll()
crypto.revoke_certs_by_user_and_project(user_id, project_id)
def test_revoke_certs_by_user(self):
user_id = 'test_user'
project_id = 2
file_name = 'test_file'
def mock_certificate_get_all_by_user(context, user_id):
return [{"user_id": user_id, "project_id": project_id,
"file_name": file_name}]
self.stubs.Set(db, 'certificate_get_all_by_user',
mock_certificate_get_all_by_user)
self.mox.StubOutWithMock(crypto, 'revoke_cert')
crypto.revoke_cert(project_id, mox.IgnoreArg())
self.mox.ReplayAll()
crypto.revoke_certs_by_user(user_id)
def test_revoke_certs_by_project(self):
user_id = 'test_user'
project_id = 2
file_name = 'test_file'
def mock_certificate_get_all_by_project(context, project_id):
return [{"user_id": user_id, "project_id": project_id,
"file_name": file_name}]
self.stubs.Set(db, 'certificate_get_all_by_project',
mock_certificate_get_all_by_project)
self.mox.StubOutWithMock(crypto, 'revoke_cert')
crypto.revoke_cert(project_id, mox.IgnoreArg())
self.mox.ReplayAll()
crypto.revoke_certs_by_project(project_id)
class CertExceptionTests(test.TestCase):
def test_fetch_ca_file_not_found(self):
with utils.tempdir() as tmpdir:
self.flags(ca_path=tmpdir)
self.flags(use_project_ca=True)
self.assertRaises(exception.CryptoCAFileNotFound, crypto.fetch_ca,
project_id='fake')
def test_fetch_crl_file_not_found(self):
with utils.tempdir() as tmpdir:
self.flags(ca_path=tmpdir)
self.flags(use_project_ca=True)
self.assertRaises(exception.CryptoCRLFileNotFound,
crypto.fetch_crl, project_id='fake')
class EncryptionTests(test.TestCase):
pubkey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArtgrfBu/g2o28o+H2ng/crv"
"zgES91i/NNPPFTOutXelrJ9QiPTPTm+B8yspLsXifmbsmXztNOlBQgQXs6usxb4"
"fnJKNUZ84Vkp5esbqK/L7eyRqwPvqo7btKBMoAMVX/kUyojMpxb7Ssh6M6Y8cpi"
"goi+MSDPD7+5yRJ9z4mH9h7MCY6Ejv8KTcNYmVHvRhsFUcVhWcIISlNWUGiG7rf"
"oki060F5myQN3AXcL8gHG5/Qb1RVkQFUKZ5geQ39/wSyYA1Q65QTba/5G2QNbl2"
"0eAIBTyKZhN6g88ak+yARa6BLLDkrlP7L4WctHQMLsuXHohQsUO9AcOlVMARgrg"
"uF test@test")
prikey = """-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAwK7YK3wbv4NqNvKPh9p4P3K784BEvdYvzTTzxUzrrV3payfU
Ij0z05vgfMrKS7F4n5m7Jl87TTpQUIEF7OrrMW+H5ySjVGfOFZKeXrG6ivy+3ska
sD76qO27SgTKADFV/5FMqIzKcW+0rIejOmPHKYoKIvjEgzw+/uckSfc+Jh/YezAm
OhI7/Ck3DWJlR70YbBVHFYVnCCEpTVlBohu636JItOtBeZskDdwF3C/IBxuf0G9U
VZEBVCmeYHkN/f8EsmANUOuUE22v+RtkDW5dtHgCAU8imYTeoPPGpPsgEWugSyw5
K5T+y+FnLR0DC7Llx6IULFDvQHDpVTAEYK4LhQIDAQABAoIBAF9ibrrgHnBpItx+
qVUMbriiGK8LUXxUmqdQTljeolDZi6KzPc2RVKWtpazBSvG7skX3+XCediHd+0JP
DNri1HlNiA6B0aUIGjoNsf6YpwsE4YwyK9cR5k5YGX4j7se3pKX2jOdngxQyw1Mh
dkmCeWZz4l67nbSFz32qeQlwrsB56THJjgHB7elDoGCXTX/9VJyjFlCbfxVCsIng
inrNgT0uMSYMNpAjTNOjguJt/DtXpwzei5eVpsERe0TRRVH23ycS0fuq/ancYwI/
MDr9KSB8r+OVGeVGj3popCxECxYLBxhqS1dAQyJjhQXKwajJdHFzidjXO09hLBBz
FiutpYUCgYEA6OFikTrPlCMGMJjSj+R9woDAOPfvCDbVZWfNo8iupiECvei88W28
RYFnvUQRjSC0pHe//mfUSmiEaE+SjkNCdnNR+vsq9q+htfrADm84jl1mfeWatg/g
zuGz2hAcZnux3kQMI7ufOwZNNpM2bf5B4yKamvG8tZRRxSkkAL1NV48CgYEA08/Z
Ty9g9XPKoLnUWStDh1zwG+c0q14l2giegxzaUAG5DOgOXbXcw0VQ++uOWD5ARELG
g9wZcbBsXxJrRpUqx+GAlv2Y1bkgiPQS1JIyhsWEUtwfAC/G+uZhCX53aI3Pbsjh
QmkPCSp5DuOuW2PybMaw+wVe+CaI/gwAWMYDAasCgYEA4Fzkvc7PVoU33XIeywr0
LoQkrb4QyPUrOvt7H6SkvuFm5thn0KJMlRpLfAksb69m2l2U1+HooZd4mZawN+eN
DNmlzgxWJDypq83dYwq8jkxmBj1DhMxfZnIE+L403nelseIVYAfPLOqxUTcbZXVk
vRQFp+nmSXqQHUe5rAy1ivkCgYEAqLu7cclchCxqDv/6mc5NTVhMLu5QlvO5U6fq
HqitgW7d69oxF5X499YQXZ+ZFdMBf19ypTiBTIAu1M3nh6LtIa4SsjXzus5vjKpj
FdQhTBus/hU83Pkymk1MoDOPDEtsI+UDDdSDldmv9pyKGWPVi7H86vusXCLWnwsQ
e6fCXWECgYEAqgpGvva5kJ1ISgNwnJbwiNw0sOT9BMOsdNZBElf0kJIIy6FMPvap
6S1ziw+XWfdQ83VIUOCL5DrwmcYzLIogS0agmnx/monfDx0Nl9+OZRxy6+AI9vkK
86A1+DXdo+IgX3grFK1l1gPhAZPRWJZ+anrEkyR4iLq6ZoPZ3BQn97U=
-----END RSA PRIVATE KEY-----"""
text = "Some text! %$*"
def _ssh_decrypt_text(self, ssh_private_key, text):
with utils.tempdir() as tmpdir:
sshkey = os.path.abspath(os.path.join(tmpdir, 'ssh.key'))
with open(sshkey, 'w') as f:
f.write(ssh_private_key)
try:
dec, _err = utils.execute('openssl',
'rsautl',
'-decrypt',
'-inkey', sshkey,
process_input=text)
return dec
except processutils.ProcessExecutionError as exc:
raise exception.DecryptionFailure(reason=exc.stderr)
def test_ssh_encrypt_decrypt_text(self):
enc = crypto.ssh_encrypt_text(self.pubkey, self.text)
self.assertNotEqual(enc, self.text)
result = self._ssh_decrypt_text(self.prikey, enc)
self.assertEqual(result, self.text)
def test_ssh_encrypt_failure(self):
self.assertRaises(exception.EncryptionFailure,
crypto.ssh_encrypt_text, '', self.text)
class ConversionTests(test.TestCase):
k1 = ("ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4CqmrxfU7x4sJrubpMNxeglul+d"
"ByrsicnvQcHDEjPzdvoz+BaoAG9bjCA5mCeTBIISsVTVXz/hxNeiuBV6LH/UR/c"
"27yl53ypN+821ImoexQZcKItdnjJ3gVZlDob1f9+1qDVy63NJ1c+TstkrCTRVeo"
"9VyE7RpdSS4UCiBe8Xwk3RkedioFxePrI0Ktc2uASw2G0G2Rl7RN7KZOJbCivfF"
"LQMAOu6e+7fYvuE1gxGHHj7dxaBY/ioGOm1W4JmQ1V7AKt19zTBlZKduN8FQMSF"
"r35CDlvoWs0+OP8nwlebKNCi/5sdL8qiSLrAcPB4LqdkAf/blNSVA2Yl83/c4lQ"
"== test@test")
k2 = ("-----BEGIN PUBLIC KEY-----\n"
"MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEA4CqmrxfU7x4sJrubpMNx\n"
"eglul+dByrsicnvQcHDEjPzdvoz+BaoAG9bjCA5mCeTBIISsVTVXz/hxNeiuBV6L\n"
"H/UR/c27yl53ypN+821ImoexQZcKItdnjJ3gVZlDob1f9+1qDVy63NJ1c+TstkrC\n"
"TRVeo9VyE7RpdSS4UCiBe8Xwk3RkedioFxePrI0Ktc2uASw2G0G2Rl7RN7KZOJbC\n"
"ivfFLQMAOu6e+7fYvuE1gxGHHj7dxaBY/ioGOm1W4JmQ1V7AKt19zTBlZKduN8FQ\n"
"MSFr35CDlvoWs0+OP8nwlebKNCi/5sdL8qiSLrAcPB4LqdkAf/blNSVA2Yl83/c4\n"
"lQIBIw==\n"
"-----END PUBLIC KEY-----\n")
def test_convert_keys(self):
result = crypto.convert_from_sshrsa_to_pkcs8(self.k1)
self.assertEqual(result, self.k2)
def test_convert_failure(self):
self.assertRaises(exception.EncryptionFailure,
crypto.convert_from_sshrsa_to_pkcs8, '')
|
apache-2.0
|
yawnosnorous/python-for-android
|
python-build/python-libs/xmpppy/doc/examples/bot.py
|
87
|
3135
|
#!/usr/bin/python
# -*- coding: koi8-r -*-
# $Id: bot.py,v 1.2 2006/10/06 12:30:42 normanr Exp $
import sys
import xmpp
commands={}
i18n={'ru':{},'en':{}}
########################### user handlers start ##################################
i18n['en']['HELP']="This is example jabber bot.\nAvailable commands: %s"
def helpHandler(user,command,args,mess):
lst=commands.keys()
lst.sort()
return "HELP",', '.join(lst)
i18n['en']['EMPTY']="%s"
i18n['en']['HOOK1']='Responce 1: %s'
def hook1Handler(user,command,args,mess):
return "HOOK1",'You requested: %s'%args
i18n['en']['HOOK2']='Responce 2: %s'
def hook2Handler(user,command,args,mess):
return "HOOK2","hook2 called with %s"%(`(user,command,args,mess)`)
i18n['en']['HOOK3']='Responce 3: static string'
def hook3Handler(user,command,args,mess):
return "HOOK3"*int(args)
########################### user handlers stop ###################################
############################ bot logic start #####################################
i18n['en']["UNKNOWN COMMAND"]='Unknown command "%s". Try "help"'
i18n['en']["UNKNOWN USER"]="I do not know you. Register first."
def messageCB(conn,mess):
text=mess.getBody()
user=mess.getFrom()
user.lang='en' # dup
if text.find(' ')+1: command,args=text.split(' ',1)
else: command,args=text,''
cmd=command.lower()
if commands.has_key(cmd): reply=commands[cmd](user,command,args,mess)
else: reply=("UNKNOWN COMMAND",cmd)
if type(reply)==type(()):
key,args=reply
if i18n[user.lang].has_key(key): pat=i18n[user.lang][key]
elif i18n['en'].has_key(key): pat=i18n['en'][key]
else: pat="%s"
if type(pat)==type(''): reply=pat%args
else: reply=pat(**args)
else:
try: reply=i18n[user.lang][reply]
except KeyError:
try: reply=i18n['en'][reply]
except KeyError: pass
if reply: conn.send(xmpp.Message(mess.getFrom(),reply))
for i in globals().keys():
if i[-7:]=='Handler' and i[:-7].lower()==i[:-7]: commands[i[:-7]]=globals()[i]
############################# bot logic stop #####################################
def StepOn(conn):
try:
conn.Process(1)
except KeyboardInterrupt: return 0
return 1
def GoOn(conn):
while StepOn(conn): pass
if len(sys.argv)<3:
print "Usage: bot.py [email protected] password"
else:
jid=xmpp.JID(sys.argv[1])
user,server,password=jid.getNode(),jid.getDomain(),sys.argv[2]
conn=xmpp.Client(server)#,debug=[])
conres=conn.connect()
if not conres:
print "Unable to connect to server %s!"%server
sys.exit(1)
if conres<>'tls':
print "Warning: unable to estabilish secure connection - TLS failed!"
authres=conn.auth(user,password)
if not authres:
print "Unable to authorize on %s - check login/password."%server
sys.exit(1)
if authres<>'sasl':
print "Warning: unable to perform SASL auth os %s. Old authentication method used!"%server
conn.RegisterHandler('message',messageCB)
conn.sendInitPresence()
print "Bot started."
GoOn(conn)
|
apache-2.0
|
arnaud-morvan/QGIS
|
python/plugins/processing/algs/gdal/Dissolve.py
|
2
|
8511
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
Dissolve.py
---------------------
Date : Janaury 2015
Copyright : (C) 2015 by Giovanni Manghi
Email : giovanni dot manghi at naturalgis dot pt
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giovanni Manghi'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Giovanni Manghi'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingParameterVectorDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
class Dissolve(GdalAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
GEOMETRY = 'GEOMETRY'
EXPLODE_COLLECTIONS = 'EXPLODE_COLLECTIONS'
KEEP_ATTRIBUTES = 'KEEP_ATTRIBUTES'
COUNT_FEATURES = 'COUNT_FEATURES'
COMPUTE_AREA = 'COMPUTE_AREA'
COMPUTE_STATISTICS = 'COMPUTE_STATISTICS'
STATISTICS_ATTRIBUTE = 'STATISTICS_ATTRIBUTE'
OPTIONS = 'OPTIONS'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Dissolve field'),
None,
self.INPUT,
QgsProcessingParameterField.Any, optional=True))
self.addParameter(QgsProcessingParameterString(self.GEOMETRY,
self.tr('Geometry column name'),
defaultValue='geometry'))
params = []
params.append(QgsProcessingParameterBoolean(self.EXPLODE_COLLECTIONS,
self.tr('Produce one feature for each geometry in any kind of geometry collection in the source file'),
defaultValue=False))
params.append(QgsProcessingParameterBoolean(self.KEEP_ATTRIBUTES,
self.tr('Keep input attributes'),
defaultValue=False))
params.append(QgsProcessingParameterBoolean(self.COUNT_FEATURES,
self.tr('Count dissolved features'),
defaultValue=False))
params.append(QgsProcessingParameterBoolean(self.COMPUTE_AREA,
self.tr('Compute area and perimeter of dissolved features'),
defaultValue=False))
params.append(QgsProcessingParameterBoolean(self.COMPUTE_STATISTICS,
self.tr('Compute min/max/sum/mean for attribute'),
defaultValue=False))
params.append(QgsProcessingParameterField(self.STATISTICS_ATTRIBUTE,
self.tr('Numeric attribute to calculate statistics on'),
None,
self.INPUT,
QgsProcessingParameterField.Numeric,
optional=True))
params.append(QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation options'),
defaultValue='',
optional=True))
for param in params:
param.setFlags(param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(param)
self.addParameter(QgsProcessingParameterVectorDestination(self.OUTPUT,
self.tr('Dissolved')))
def name(self):
return 'dissolve'
def displayName(self):
return self.tr('Dissolve')
def group(self):
return self.tr('Vector geoprocessing')
def groupId(self):
return 'vectorgeoprocessing'
def commandName(self):
return 'ogr2ogr'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
fields = source.fields()
ogrLayer, layerName = self.getOgrCompatibleSource(self.INPUT, parameters, context, feedback, executing)
geometry = self.parameterAsString(parameters, self.GEOMETRY, context)
fieldName = self.parameterAsString(parameters, self.FIELD, context)
options = self.parameterAsString(parameters, self.OPTIONS, context)
outFile = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
output, outputFormat = GdalUtils.ogrConnectionStringAndFormat(outFile, context)
other_fields = []
for f in fields:
if f.name() == geometry:
continue
other_fields.append(f.name())
if other_fields:
other_fields = ',*'
else:
other_fields = ''
arguments = []
arguments.append(output)
arguments.append(ogrLayer)
arguments.append('-dialect')
arguments.append('sqlite')
arguments.append('-sql')
tokens = []
if self.parameterAsBool(parameters, self.COUNT_FEATURES, context):
tokens.append("COUNT({}) AS count".format(geometry))
if self.parameterAsBool(parameters, self.COMPUTE_AREA, context):
tokens.append("SUM(ST_Area({0})) AS area, ST_Perimeter(ST_Union({0})) AS perimeter".format(geometry))
statsField = self.parameterAsString(parameters, self.STATISTICS_ATTRIBUTE, context)
if statsField and self.parameterAsBool(parameters, self.COMPUTE_STATISTICS, context):
tokens.append("SUM({0}) AS sum, MIN({0}) AS min, MAX({0}) AS max, AVG({0}) AS avg".format(statsField))
params = ','.join(tokens)
if params:
params = ', ' + params
group_by = ''
if fieldName:
group_by = ' GROUP BY {}'.format(fieldName)
if self.parameterAsBool(parameters, self.KEEP_ATTRIBUTES, context):
sql = "SELECT ST_Union({}) AS {}{}{} FROM '{}'{}".format(geometry, geometry, other_fields, params, layerName, group_by)
else:
sql = "SELECT ST_Union({}) AS {}{}{} FROM '{}'{}".format(geometry, geometry, ', ' + fieldName if fieldName else '',
params, layerName, group_by)
arguments.append(sql)
if self.parameterAsBool(parameters, self.EXPLODE_COLLECTIONS, context):
arguments.append('-explodecollections')
if options:
arguments.append(options)
if outputFormat:
arguments.append('-f {}'.format(outputFormat))
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
|
gpl-2.0
|
nikhilprathapani/python-for-android
|
python3-alpha/python3-src/Lib/socket.py
|
46
|
13835
|
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EINTR = getattr(errno, 'EINTR', 4)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name."""
s = _socket.socket.__repr__(self)
if s.startswith("<socket object"):
s = "<%s.%s%s%s" % (self.__class__.__module__,
self.__class__.__name__,
getattr(self, '_closed', False) and " [closed] " or "",
s[7:])
return s
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
sock = socket(self.family, self.type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename,
except the only mode characters supported are 'r', 'w' and 'b'.
The semantics are similar too. (XXX refactor to share code?)
"""
for c in mode:
if c not in {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)")
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
n = e.args[0]
if n == EINTR:
continue
if n in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
return self._reading and not self.closed
def writable(self):
"""True if the SocketIO is open for writing.
"""
return self._writing and not self.closed
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
|
apache-2.0
|
dakerfp/AutobahnPython
|
autobahn/autobahn/wamp1/protocol.py
|
17
|
82907
|
###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
import sys
PY3 = sys.version_info >= (3,)
__all__ = ("WampProtocol",
"WampFactory",
"WampServerProtocol",
"WampServerFactory",
"WampClientProtocol",
"WampClientFactory",
"WampCraProtocol",
"WampCraClientProtocol",
"WampCraServerProtocol",
"json_lib",
"json_loads",
"json_dumps",)
import inspect, types
import traceback
if PY3:
from io import StringIO
else:
import StringIO
import hashlib, hmac, binascii, random
from twisted.python import log
from twisted.internet.defer import Deferred, \
maybeDeferred
from autobahn import __version__
from autobahn.websocket.protocol import WebSocketProtocol, \
Timings
from autobahn.websocket import http
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory, \
WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.wamp1.pbkdf2 import pbkdf2_bin
from autobahn.wamp1.prefixmap import PrefixMap
from autobahn.util import utcnow, newid
def exportRpc(arg = None):
"""
Decorator for RPC'ed callables.
"""
## decorator without argument
if type(arg) is types.FunctionType:
arg._autobahn_rpc_id = arg.__name__
return arg
## decorator with argument
else:
def inner(f):
f._autobahn_rpc_id = arg
return f
return inner
def exportSub(arg, prefixMatch = False):
"""
Decorator for subscription handlers.
"""
def inner(f):
f._autobahn_sub_id = arg
f._autobahn_sub_prefix_match = prefixMatch
return f
return inner
def exportPub(arg, prefixMatch = False):
"""
Decorator for publication handlers.
"""
def inner(f):
f._autobahn_pub_id = arg
f._autobahn_pub_prefix_match = prefixMatch
return f
return inner
class WampProtocol:
"""
WAMP protocol base class. Mixin for WampServerProtocol and WampClientProtocol.
"""
URI_WAMP_BASE = "http://api.wamp.ws/"
"""
WAMP base URI for WAMP predefined things.
"""
URI_WAMP_ERROR = URI_WAMP_BASE + "error#"
"""
Prefix for WAMP errors.
"""
URI_WAMP_PROCEDURE = URI_WAMP_BASE + "procedure#"
"""
Prefix for WAMP predefined RPC endpoints.
"""
URI_WAMP_TOPIC = URI_WAMP_BASE + "topic#"
"""
Prefix for WAMP predefined PubSub topics.
"""
URI_WAMP_ERROR_GENERIC = URI_WAMP_ERROR + "generic"
"""
WAMP error URI for generic errors.
"""
DESC_WAMP_ERROR_GENERIC = "generic error"
"""
Description for WAMP generic errors.
"""
URI_WAMP_ERROR_INTERNAL = URI_WAMP_ERROR + "internal"
"""
WAMP error URI for internal errors.
"""
DESC_WAMP_ERROR_INTERNAL = "internal error"
"""
Description for WAMP internal errors.
"""
URI_WAMP_ERROR_NO_SUCH_RPC_ENDPOINT = URI_WAMP_ERROR + "NoSuchRPCEndpoint"
"""
WAMP error URI for RPC endpoint not found.
"""
WAMP_PROTOCOL_VERSION = 1
"""
WAMP version this server speaks. Versions are numbered consecutively
(integers, no gaps).
"""
MESSAGE_TYPEID_WELCOME = 0
"""
Server-to-client welcome message containing session ID.
"""
MESSAGE_TYPEID_PREFIX = 1
"""
Client-to-server message establishing a URI prefix to be used in CURIEs.
"""
MESSAGE_TYPEID_CALL = 2
"""
Client-to-server message initiating an RPC.
"""
MESSAGE_TYPEID_CALL_RESULT = 3
"""
Server-to-client message returning the result of a successful RPC.
"""
MESSAGE_TYPEID_CALL_ERROR = 4
"""
Server-to-client message returning the error of a failed RPC.
"""
MESSAGE_TYPEID_SUBSCRIBE = 5
"""
Client-to-server message subscribing to a topic.
"""
MESSAGE_TYPEID_UNSUBSCRIBE = 6
"""
Client-to-server message unsubscribing from a topic.
"""
MESSAGE_TYPEID_PUBLISH = 7
"""
Client-to-server message publishing an event to a topic.
"""
MESSAGE_TYPEID_EVENT = 8
"""
Server-to-client message providing the event of a (subscribed) topic.
"""
def connectionMade(self):
self.debugWamp = self.factory.debugWamp
self.debugApp = self.factory.debugApp
self.prefixes = PrefixMap()
self.calls = {}
self.procs = {}
def connectionLost(self, reason):
pass
def _protocolError(self, reason):
if self.debugWamp:
log.msg("Closing Wamp session on protocol violation : %s" % reason)
## FIXME: subprotocols are probably not supposed to close with CLOSE_STATUS_CODE_PROTOCOL_ERROR
##
self.protocolViolation("Wamp RPC/PubSub protocol violation ('%s')" % reason)
def shrink(self, uri, passthrough = False):
"""
Shrink given URI to CURIE according to current prefix mapping.
If no appropriate prefix mapping is available, return original URI.
:param uri: URI to shrink.
:type uri: str
:returns str -- CURIE or original URI.
"""
return self.prefixes.shrink(uri)
def resolve(self, curieOrUri, passthrough = False):
"""
Resolve given CURIE/URI according to current prefix mapping or return
None if cannot be resolved.
:param curieOrUri: CURIE or URI.
:type curieOrUri: str
:returns: str -- Full URI for CURIE or None.
"""
return self.prefixes.resolve(curieOrUri)
def resolveOrPass(self, curieOrUri):
"""
Resolve given CURIE/URI according to current prefix mapping or return
string verbatim if cannot be resolved.
:param curieOrUri: CURIE or URI.
:type curieOrUri: str
:returns: str -- Full URI for CURIE or original string.
"""
return self.prefixes.resolveOrPass(curieOrUri)
def serializeMessage(self, msg):
"""
Delegate message serialization to the factory.
:param msg: The message to be serialized.
:type msg: str
:return: The serialized message.
"""
return self.factory._serialize(msg)
def registerForRpc(self, obj, baseUri = "", methods = None):
"""
Register an service object for RPC. A service object has methods
which are decorated using @exportRpc.
:param obj: The object to be registered (in this WebSockets session) for RPC.
:type obj: Object with methods decorated using @exportRpc.
:param baseUri: Optional base URI which is prepended to method names for export.
:type baseUri: String.
:param methods: If not None, a list of unbound class methods corresponding to obj
which should be registered. This can be used to register only a subset
of the methods decorated with @exportRpc.
:type methods: List of unbound class methods.
"""
for k in inspect.getmembers(obj.__class__, inspect.ismethod):
if k[1].__dict__.has_key("_autobahn_rpc_id"):
if methods is None or k[1] in methods:
uri = baseUri + k[1].__dict__["_autobahn_rpc_id"]
proc = k[1]
self.registerMethodForRpc(uri, obj, proc)
def registerMethodForRpc(self, uri, obj, proc):
"""
Register a method of an object for RPC.
:param uri: URI to register RPC method under.
:type uri: str
:param obj: The object on which to register a method for RPC.
:type obj: object
:param proc: Unbound object method to register RPC for.
:type proc: unbound method
"""
self.procs[uri] = (obj, proc, False)
if self.debugWamp:
log.msg("registered remote method on %s" % uri)
def registerProcedureForRpc(self, uri, proc):
"""
Register a (free standing) function/procedure for RPC.
:param uri: URI to register RPC function/procedure under.
:type uri: str
:param proc: Free-standing function/procedure.
:type proc: callable
"""
self.procs[uri] = (None, proc, False)
if self.debugWamp:
log.msg("registered remote procedure on %s" % uri)
def registerHandlerMethodForRpc(self, uri, obj, handler, extra = None):
"""
Register a handler on an object for RPC.
:param uri: URI to register RPC method under.
:type uri: str
:param obj: The object on which to register the RPC handler
:type obj: object
:param proc: Unbound object method to register RPC for.
:type proc: unbound method
:param extra: Optional extra data that will be given to the handler at call time.
:type extra: object
"""
self.procs[uri] = (obj, handler, True, extra)
if self.debugWamp:
log.msg("registered remote handler method on %s" % uri)
def registerHandlerProcedureForRpc(self, uri, handler, extra = None):
"""
Register a (free standing) handler for RPC.
:param uri: URI to register RPC handler under.
:type uri: str
:param proc: Free-standing handler
:type proc: callable
:param extra: Optional extra data that will be given to the handler at call time.
:type extra: object
"""
self.procs[uri] = (None, handler, True, extra)
if self.debugWamp:
log.msg("registered remote handler procedure on %s" % uri)
def procForUri(self, uri):
"""
Returns the procedure specification for `uri` or None, if it does not exist.
:param uri: URI to be checked.
:type uri: str
:returns: The procedure specification for `uri`, if it exists,
`None` otherwise.
"""
return self.procs[uri] if uri in self.procs else None
def onBeforeCall(self, callid, uri, args, isRegistered):
"""
Callback fired before executing incoming RPC. This can be used for
logging, statistics tracking or redirecting RPCs or argument mangling i.e.
The default implementation just returns the incoming URI/args.
:param uri: RPC endpoint URI (fully-qualified).
:type uri: str
:param args: RPC arguments array.
:type args: list
:param isRegistered: True, iff RPC endpoint URI is registered in this session.
:type isRegistered: bool
:returns pair -- Must return URI/Args pair.
"""
return uri, args
def onAfterCallSuccess(self, result, call):
"""
Callback fired after executing incoming RPC with success, but before
sending the RPC success message.
The default implementation will just return `result` to the client.
:param result: Result returned for executing the incoming RPC.
:type result: Anything returned by the user code for the endpoint.
:param call: WAMP call object for incoming RPC.
:type call: instance of Call
:returns obj -- Result send back to client.
"""
return result
def onAfterCallError(self, error, call):
"""
Callback fired after executing incoming RPC with failure, but before
sending the RPC error message.
The default implementation will just return `error` to the client.
:param error: Error that occurred during incomnig RPC call execution.
:type error: Instance of twisted.python.failure.Failure
:param call: WAMP call object for incoming RPC.
:type call: instance of Call
:returns twisted.python.failure.Failure -- Error sent back to client.
"""
return error
def onAfterSendCallSuccess(self, msg, call):
"""
Callback fired after sending RPC success message.
:param msg: Serialized WAMP message.
:type msg: str
:param call: WAMP call object for incoming RPC.
:type call: instance of Call
"""
pass
def onAfterSendCallError(self, msg, call):
"""
Callback fired after sending RPC error message.
:param msg: Serialized WAMP message.
:type msg: str
:param call: WAMP call object for incoming RPC.
:type call: instance of Call
"""
pass
def call(self, *args):
"""
Perform a remote-procedure call (RPC). The first argument is the procedure
URI (mandatory). Subsequent positional arguments can be provided (must be
JSON serializable). The return value is a Twisted Deferred.
"""
if len(args) < 1:
raise Exception("missing procedure URI")
if type(args[0]) not in [unicode, str]:
raise Exception("invalid type for procedure URI")
procuri = args[0]
while True:
callid = newid()
if not self.calls.has_key(callid):
break
d = Deferred()
self.calls[callid] = d
msg = [WampProtocol.MESSAGE_TYPEID_CALL, callid, procuri]
msg.extend(args[1:])
try:
o = self.factory._serialize(msg)
except:
raise Exception("call argument(s) not JSON serializable")
self.sendMessage(o)
return d
## use Ultrajson (https://github.com/esnme/ultrajson) if available
##
try:
import ujson
json_lib = ujson
json_loads = ujson.loads
json_dumps = lambda x: ujson.dumps(x, ensure_ascii = False)
except:
import json
json_lib = json
json_loads = json.loads
json_dumps = json.dumps
class WampFactory:
"""
WAMP factory base class. Mixin for WampServerFactory and WampClientFactory.
"""
def __init__(self):
if self.debugWamp:
log.msg("Using JSON processor '%s'" % json_lib.__name__)
def _serialize(self, obj):
"""
Default object serializer.
"""
return json_dumps(obj)
def _unserialize(self, bytes):
"""
Default object deserializer.
"""
return json_loads(bytes)
class WampServerProtocol(WebSocketServerProtocol, WampProtocol):
"""
Server factory for Wamp RPC/PubSub.
"""
SUBSCRIBE = 1
PUBLISH = 2
def onSessionOpen(self):
"""
Callback fired when WAMP session was fully established.
"""
pass
def onOpen(self):
"""
Default implementation for WAMP connection opened sends
Welcome message containing session ID.
"""
self.session_id = newid()
## include traceback as error detail for RPC errors with
## no error URI - that is errors returned with URI_WAMP_ERROR_GENERIC
self.includeTraceback = False
msg = [WampProtocol.MESSAGE_TYPEID_WELCOME,
self.session_id,
WampProtocol.WAMP_PROTOCOL_VERSION,
"Autobahn/%s" % __version__]
o = self.factory._serialize(msg)
self.sendMessage(o)
self.factory._addSession(self, self.session_id)
self.onSessionOpen()
def onConnect(self, connectionRequest):
"""
Default implementation for WAMP connection acceptance:
check if client announced WAMP subprotocol, and only accept connection
if client did so.
"""
for p in connectionRequest.protocols:
if p in self.factory.protocols:
return (p, {}) # return (protocol, headers)
raise http.HttpException(http.BAD_REQUEST[0], "this server only speaks WAMP")
def connectionMade(self):
WebSocketServerProtocol.connectionMade(self)
WampProtocol.connectionMade(self)
## RPCs registered in this session (a URI map of (object, procedure)
## pairs for object methods or (None, procedure) for free standing procedures)
self.procs = {}
## Publication handlers registered in this session (a URI map of (object, pubHandler) pairs
## pairs for object methods (handlers) or (None, None) for topic without handler)
self.pubHandlers = {}
## Subscription handlers registered in this session (a URI map of (object, subHandler) pairs
## pairs for object methods (handlers) or (None, None) for topic without handler)
self.subHandlers = {}
self.handlerMapping = {
self.MESSAGE_TYPEID_CALL: CallHandler(self, self.prefixes),
self.MESSAGE_TYPEID_CALL_RESULT: CallResultHandler(self, self.prefixes),
self.MESSAGE_TYPEID_CALL_ERROR: CallErrorHandler(self, self.prefixes)}
def connectionLost(self, reason):
self.factory._unsubscribeClient(self)
self.factory._removeSession(self)
WampProtocol.connectionLost(self, reason)
WebSocketServerProtocol.connectionLost(self, reason)
def sendMessage(self,
payload,
binary = False,
payload_frag_size = None,
sync = False,
doNotCompress = False):
if self.debugWamp:
log.msg("TX WAMP: %s" % str(payload))
WebSocketServerProtocol.sendMessage(self,
payload,
binary,
payload_frag_size,
sync,
doNotCompress)
def _getPubHandler(self, topicUri):
## Longest matching prefix based resolution of (full) topic URI to
## publication handler.
## Returns a 5-tuple (consumedUriPart, unconsumedUriPart, handlerObj, handlerProc, prefixMatch)
##
for i in xrange(len(topicUri), -1, -1):
tt = topicUri[:i]
if self.pubHandlers.has_key(tt):
h = self.pubHandlers[tt]
return (tt, topicUri[i:], h[0], h[1], h[2])
return None
def _getSubHandler(self, topicUri):
## Longest matching prefix based resolution of (full) topic URI to
## subscription handler.
## Returns a 5-tuple (consumedUriPart, unconsumedUriPart, handlerObj, handlerProc, prefixMatch)
##
for i in xrange(len(topicUri), -1, -1):
tt = topicUri[:i]
if self.subHandlers.has_key(tt):
h = self.subHandlers[tt]
return (tt, topicUri[i:], h[0], h[1], h[2])
return None
def registerForPubSub(self, topicUri, prefixMatch = False, pubsub = PUBLISH | SUBSCRIBE):
"""
Register a topic URI as publish/subscribe channel in this session.
:param topicUri: Topic URI to be established as publish/subscribe channel.
:type topicUri: str
:param prefixMatch: Allow to match this topic URI by prefix.
:type prefixMatch: bool
:param pubsub: Allow publication and/or subscription.
:type pubsub: WampServerProtocol.PUB, WampServerProtocol.SUB, WampServerProtocol.PUB | WampServerProtocol.SUB
"""
if pubsub & WampServerProtocol.PUBLISH:
self.pubHandlers[topicUri] = (None, None, prefixMatch)
if self.debugWamp:
log.msg("registered topic %s for publication (match by prefix = %s)" % (topicUri, prefixMatch))
if pubsub & WampServerProtocol.SUBSCRIBE:
self.subHandlers[topicUri] = (None, None, prefixMatch)
if self.debugWamp:
log.msg("registered topic %s for subscription (match by prefix = %s)" % (topicUri, prefixMatch))
def registerHandlerForPubSub(self, obj, baseUri = ""):
"""
Register a handler object for PubSub. A handler object has methods
which are decorated using @exportPub and @exportSub.
:param obj: The object to be registered (in this WebSockets session) for PubSub.
:type obj: Object with methods decorated using @exportPub and @exportSub.
:param baseUri: Optional base URI which is prepended to topic names for export.
:type baseUri: String.
"""
for k in inspect.getmembers(obj.__class__, inspect.ismethod):
if k[1].__dict__.has_key("_autobahn_pub_id"):
uri = baseUri + k[1].__dict__["_autobahn_pub_id"]
prefixMatch = k[1].__dict__["_autobahn_pub_prefix_match"]
proc = k[1]
self.registerHandlerForPub(uri, obj, proc, prefixMatch)
elif k[1].__dict__.has_key("_autobahn_sub_id"):
uri = baseUri + k[1].__dict__["_autobahn_sub_id"]
prefixMatch = k[1].__dict__["_autobahn_sub_prefix_match"]
proc = k[1]
self.registerHandlerForSub(uri, obj, proc, prefixMatch)
def registerHandlerForSub(self, uri, obj, proc, prefixMatch = False):
"""
Register a method of an object as subscription handler.
:param uri: Topic URI to register subscription handler for.
:type uri: str
:param obj: The object on which to register a method as subscription handler.
:type obj: object
:param proc: Unbound object method to register as subscription handler.
:type proc: unbound method
:param prefixMatch: Allow to match this topic URI by prefix.
:type prefixMatch: bool
"""
self.subHandlers[uri] = (obj, proc, prefixMatch)
if not self.pubHandlers.has_key(uri):
self.pubHandlers[uri] = (None, None, False)
if self.debugWamp:
log.msg("registered subscription handler for topic %s" % uri)
def registerHandlerForPub(self, uri, obj, proc, prefixMatch = False):
"""
Register a method of an object as publication handler.
:param uri: Topic URI to register publication handler for.
:type uri: str
:param obj: The object on which to register a method as publication handler.
:type obj: object
:param proc: Unbound object method to register as publication handler.
:type proc: unbound method
:param prefixMatch: Allow to match this topic URI by prefix.
:type prefixMatch: bool
"""
self.pubHandlers[uri] = (obj, proc, prefixMatch)
if not self.subHandlers.has_key(uri):
self.subHandlers[uri] = (None, None, False)
if self.debugWamp:
log.msg("registered publication handler for topic %s" % uri)
def dispatch(self, topicUri, event, exclude = [], eligible = None):
"""
Dispatch an event for a topic to all clients subscribed to
and authorized for that topic.
Optionally, exclude list of clients and/or only consider clients
from explicit eligibles. In other words, the event is delivered
to the set
(subscribers - excluded) & eligible
:param topicUri: URI of topic to publish event to.
:type topicUri: str
:param event: Event to dispatch.
:type event: obj
:param exclude: Optional list of clients (WampServerProtocol instances) to exclude.
:type exclude: list of obj
:param eligible: Optional list of clients (WampServerProtocol instances) eligible at all (or None for all).
:type eligible: list of obj
:returns twisted.internet.defer.Deferred -- Will be fired when event was
dispatched to all subscribers. The return value provided to the deferred
is a pair (delivered, requested), where delivered = number of actual
receivers, and requested = number of (subscribers - excluded) & eligible.
"""
return self.factory.dispatch(topicUri, event, exclude, eligible)
def onMessage(self, msg, binary):
"""
Handle WAMP messages received from WAMP client.
"""
if self.debugWamp:
log.msg("RX WAMP: %s" % str(msg))
if not binary:
try:
obj = self.factory._unserialize(msg)
if type(obj) == list:
msgtype = obj[0]
### XXX Replace check by try...except when all handlers
### XXX are in place. Exception handling should create
### XXX a protocolError message about unsupported
### XXX message type
if msgtype in [WampProtocol.MESSAGE_TYPEID_CALL,
WampProtocol.MESSAGE_TYPEID_CALL_RESULT,
WampProtocol.MESSAGE_TYPEID_CALL_ERROR]:
self.handlerMapping[msgtype].handleMessage(obj)
### XXX Move remaining code to appropriate handlers
## Subscribe Message
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_SUBSCRIBE:
topicUri = self.prefixes.resolveOrPass(obj[1]) ### PFX - remove
h = self._getSubHandler(topicUri)
if h:
## either exact match or prefix match allowed
if h[1] == "" or h[4]:
## direct topic
if h[2] is None and h[3] is None:
self.factory._subscribeClient(self, topicUri)
## topic handled by subscription handler
else:
## handler is object method
if h[2]:
a = maybeDeferred(h[3], h[2], str(h[0]), str(h[1]))
## handler is free standing procedure
else:
a = maybeDeferred(h[3], str(h[0]), str(h[1]))
def fail(failure):
if self.debugWamp:
log.msg("exception during custom subscription handler: %s" % failure)
def done(result):
## only subscribe client if handler did return True
if result:
self.factory._subscribeClient(self, topicUri)
a.addCallback(done).addErrback(fail)
else:
if self.debugWamp:
log.msg("topic %s matches only by prefix and prefix match disallowed" % topicUri)
else:
if self.debugWamp:
log.msg("no topic / subscription handler registered for %s" % topicUri)
## Unsubscribe Message
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_UNSUBSCRIBE:
topicUri = self.prefixes.resolveOrPass(obj[1]) ### PFX - remove
self.factory._unsubscribeClient(self, topicUri)
## Publish Message
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_PUBLISH:
topicUri = self.prefixes.resolveOrPass(obj[1]) ### PFX - remove
h = self._getPubHandler(topicUri)
if h:
## either exact match or prefix match allowed
if h[1] == "" or h[4]:
## Event
##
event = obj[2]
## Exclude Sessions List
##
exclude = [self] # exclude publisher by default
if len(obj) >= 4:
if type(obj[3]) == bool:
if not obj[3]:
exclude = []
elif type(obj[3]) == list:
## map session IDs to protos
exclude = self.factory.sessionIdsToProtos(obj[3])
else:
## FIXME: invalid type
pass
## Eligible Sessions List
##
eligible = None # all sessions are eligible by default
if len(obj) >= 5:
if type(obj[4]) == list:
## map session IDs to protos
eligible = self.factory.sessionIdsToProtos(obj[4])
else:
## FIXME: invalid type
pass
## direct topic
if h[2] is None and h[3] is None:
self.factory.dispatch(topicUri, event, exclude, eligible)
## topic handled by publication handler
else:
## handler is object method
if h[2]:
e = maybeDeferred(h[3], h[2], str(h[0]), str(h[1]), event)
## handler is free standing procedure
else:
e = maybeDeferred(h[3], str(h[0]), str(h[1]), event)
def fail(failure):
if self.debugWamp:
log.msg("exception during custom publication handler: %s" % failure)
def done(result):
## only dispatch event if handler did return event
if result:
self.factory.dispatch(topicUri, result, exclude, eligible)
e.addCallback(done).addErrback(fail)
else:
if self.debugWamp:
log.msg("topic %s matches only by prefix and prefix match disallowed" % topicUri)
else:
if self.debugWamp:
log.msg("no topic / publication handler registered for %s" % topicUri)
## Define prefix to be used in CURIEs
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_PREFIX:
prefix = obj[1]
uri = obj[2]
self.prefixes.set(prefix, uri) ### PFX - remove whole block (this msg type won't survive)
else:
log.msg("unknown message type")
else:
log.msg("msg not a list")
except Exception as e:
traceback.print_exc()
else:
log.msg("binary message")
class WampServerFactory(WebSocketServerFactory, WampFactory):
"""
Server factory for Wamp RPC/PubSub.
"""
protocol = WampServerProtocol
"""
Twisted protocol used by default for WAMP servers.
"""
def __init__(self,
url,
debug = False,
debugCodePaths = False,
debugWamp = False,
debugApp = False,
externalPort = None,
reactor = None):
self.debugWamp = debugWamp
self.debugApp = debugApp
WebSocketServerFactory.__init__(self,
url,
protocols = ["wamp"],
debug = debug,
debugCodePaths = debugCodePaths,
externalPort = externalPort,
reactor = reactor)
WampFactory.__init__(self)
def onClientSubscribed(self, proto, topicUri):
"""
Callback fired when peer was (successfully) subscribed on some topic.
:param proto: Peer protocol instance subscribed.
:type proto: Instance of WampServerProtocol.
:param topicUri: Fully qualified, resolved URI of topic subscribed.
:type topicUri: str
"""
pass
def _subscribeClient(self, proto, topicUri):
"""
Called from proto to subscribe client for topic.
"""
if not self.subscriptions.has_key(topicUri):
self.subscriptions[topicUri] = set()
if self.debugWamp:
log.msg("subscriptions map created for topic %s" % topicUri)
if not proto in self.subscriptions[topicUri]:
self.subscriptions[topicUri].add(proto)
if self.debugWamp:
log.msg("subscribed peer %s on topic %s" % (proto.peer, topicUri))
self.onClientSubscribed(proto, topicUri)
else:
if self.debugWamp:
log.msg("peer %s already subscribed on topic %s" % (proto.peer, topicUri))
def onClientUnsubscribed(self, proto, topicUri):
"""
Callback fired when peer was (successfully) unsubscribed from some topic.
:param proto: Peer protocol instance unsubscribed.
:type proto: Instance of WampServerProtocol.
:param topicUri: Fully qualified, resolved URI of topic unsubscribed.
:type topicUri: str
"""
pass
def _unsubscribeClient(self, proto, topicUri = None):
"""
Called from proto to unsubscribe client from topic.
"""
if topicUri:
if self.subscriptions.has_key(topicUri) and proto in self.subscriptions[topicUri]:
self.subscriptions[topicUri].discard(proto)
if self.debugWamp:
log.msg("unsubscribed peer %s from topic %s" % (proto.peer, topicUri))
if len(self.subscriptions[topicUri]) == 0:
del self.subscriptions[topicUri]
if self.debugWamp:
log.msg("topic %s removed from subscriptions map - no one subscribed anymore" % topicUri)
self.onClientUnsubscribed(proto, topicUri)
else:
if self.debugWamp:
log.msg("peer %s not subscribed on topic %s" % (proto.peer, topicUri))
else:
for topicUri, subscribers in self.subscriptions.items():
if proto in subscribers:
subscribers.discard(proto)
if self.debugWamp:
log.msg("unsubscribed peer %s from topic %s" % (proto.peer, topicUri))
if len(subscribers) == 0:
del self.subscriptions[topicUri]
if self.debugWamp:
log.msg("topic %s removed from subscriptions map - no one subscribed anymore" % topicUri)
self.onClientUnsubscribed(proto, topicUri)
if self.debugWamp:
log.msg("unsubscribed peer %s from all topics" % (proto.peer))
def dispatch(self, topicUri, event, exclude = [], eligible = None):
"""
Dispatch an event to all peers subscribed to the event topic.
:param topicUri: Topic to publish event to.
:type topicUri: str
:param event: Event to publish (must be JSON serializable).
:type event: obj
:param exclude: List of WampServerProtocol instances to exclude from receivers.
:type exclude: List of obj
:param eligible: List of WampServerProtocol instances eligible as receivers (or None for all).
:type eligible: List of obj
:returns twisted.internet.defer.Deferred -- Will be fired when event was
dispatched to all subscribers. The return value provided to the deferred
is a pair (delivered, requested), where delivered = number of actual
receivers, and requested = number of (subscribers - excluded) & eligible.
"""
if self.debugWamp:
log.msg("publish event %s for topicUri %s" % (str(event), topicUri))
d = Deferred()
if self.subscriptions.has_key(topicUri) and len(self.subscriptions[topicUri]) > 0:
## FIXME: this might break ordering of event delivery from a
## receiver perspective. We might need to have send queues
## per receiver OR do recvs = deque(sorted(..))
## However, see http://twistedmatrix.com/trac/ticket/1396
if eligible is not None:
subscrbs = set(eligible) & self.subscriptions[topicUri]
else:
subscrbs = self.subscriptions[topicUri]
if len(exclude) > 0:
recvs = subscrbs - set(exclude)
else:
recvs = subscrbs
l = len(recvs)
if l > 0:
## ok, at least 1 subscriber not excluded and eligible
## => prepare message for mass sending
##
o = [WampProtocol.MESSAGE_TYPEID_EVENT, topicUri, event]
try:
msg = self._serialize(o)
if self.debugWamp:
log.msg("serialized event msg: " + str(msg))
except Exception as e:
raise Exception("invalid type for event - serialization failed [%s]" % e)
preparedMsg = self.prepareMessage(msg)
## chunked sending of prepared message
##
self._sendEvents(preparedMsg, recvs.copy(), 0, l, d)
else:
## receivers list empty after considering exlude and eligible sessions
##
d.callback((0, 0))
else:
## no one subscribed on topic
##
d.callback((0, 0))
return d
def _sendEvents(self, preparedMsg, recvs, delivered, requested, d):
"""
Delivers events to receivers in chunks and reenters the reactor
in-between, so that other stuff can run.
"""
## deliver a batch of events
done = False
for i in xrange(0, 256):
try:
proto = recvs.pop()
if proto.state == WebSocketProtocol.STATE_OPEN:
try:
proto.sendPreparedMessage(preparedMsg)
except:
pass
else:
if self.debugWamp:
log.msg("delivered event to peer %s" % proto.peer)
delivered += 1
except KeyError:
# all receivers done
done = True
break
if not done:
## if there are receivers left, redo
self.reactor.callLater(0, self._sendEvents, preparedMsg, recvs, delivered, requested, d)
else:
## else fire final result
d.callback((delivered, requested))
def _addSession(self, proto, session_id):
"""
Add proto for session ID.
"""
if not self.protoToSessions.has_key(proto):
self.protoToSessions[proto] = session_id
else:
raise Exception("logic error - dublicate _addSession for protoToSessions")
if not self.sessionsToProto.has_key(session_id):
self.sessionsToProto[session_id] = proto
else:
raise Exception("logic error - dublicate _addSession for sessionsToProto")
def _removeSession(self, proto):
"""
Remove session by proto.
"""
if self.protoToSessions.has_key(proto):
session_id = self.protoToSessions[proto]
del self.protoToSessions[proto]
if self.sessionsToProto.has_key(session_id):
del self.sessionsToProto[session_id]
def sessionIdToProto(self, sessionId):
"""
Map WAMP session ID to connected protocol instance (object of type WampServerProtocol).
:param sessionId: WAMP session ID to be mapped.
:type sessionId: str
:returns obj -- WampServerProtocol instance or None.
"""
return self.sessionsToProto.get(sessionId, None)
def sessionIdsToProtos(self, sessionIds):
"""
Map WAMP session IDs to connected protocol instances (objects of type WampServerProtocol).
:param sessionIds: List of session IDs to be mapped.
:type sessionIds: list of str
:returns list -- List of WampServerProtocol instances corresponding to the WAMP session IDs.
"""
protos = []
for s in sessionIds:
if self.sessionsToProto.has_key(s):
protos.append(self.sessionsToProto[s])
return protos
def protoToSessionId(self, proto):
"""
Map connected protocol instance (object of type WampServerProtocol) to WAMP session ID.
:param proto: Instance of WampServerProtocol to be mapped.
:type proto: obj of WampServerProtocol
:returns str -- WAMP session ID or None.
"""
return self.protoToSessions.get(proto, None)
def protosToSessionIds(self, protos):
"""
Map connected protocol instances (objects of type WampServerProtocol) to WAMP session IDs.
:param protos: List of instances of WampServerProtocol to be mapped.
:type protos: list of WampServerProtocol
:returns list -- List of WAMP session IDs corresponding to the protos.
"""
sessionIds = []
for p in protos:
if self.protoToSessions.has_key(p):
sessionIds.append(self.protoToSessions[p])
return sessionIds
def startFactory(self):
"""
Called by Twisted when the factory starts up. When overriding, make
sure to call the base method.
"""
if self.debugWamp:
log.msg("WampServerFactory starting")
self.subscriptions = {}
self.protoToSessions = {}
self.sessionsToProto = {}
def stopFactory(self):
"""
Called by Twisted when the factory shuts down. When overriding, make
sure to call the base method.
"""
if self.debugWamp:
log.msg("WampServerFactory stopped")
class WampClientProtocol(WebSocketClientProtocol, WampProtocol):
"""
Twisted client protocol for WAMP.
"""
def onSessionOpen(self):
"""
Callback fired when WAMP session was fully established. Override
in derived class.
"""
pass
def onOpen(self):
## do nothing here .. onSessionOpen is only fired when welcome
## message was received (and thus session ID set)
pass
def onConnect(self, connectionResponse):
if connectionResponse.protocol not in self.factory.protocols:
raise Exception("server does not speak WAMP")
def connectionMade(self):
WebSocketClientProtocol.connectionMade(self)
WampProtocol.connectionMade(self)
self.subscriptions = {}
self.handlerMapping = {
self.MESSAGE_TYPEID_CALL: CallHandler(self, self.prefixes),
self.MESSAGE_TYPEID_CALL_RESULT: CallResultHandler(self, self.prefixes),
self.MESSAGE_TYPEID_CALL_ERROR: CallErrorHandler(self, self.prefixes)}
def connectionLost(self, reason):
WampProtocol.connectionLost(self, reason)
WebSocketClientProtocol.connectionLost(self, reason)
def sendMessage(self, payload):
if self.debugWamp:
log.msg("TX WAMP: %s" % str(payload))
WebSocketClientProtocol.sendMessage(self, payload)
def onMessage(self, msg, binary):
"""Internal method to handle WAMP messages received from WAMP server."""
## WAMP is text message only
##
if binary:
self._protocolError("binary WebSocket message received")
return
if self.debugWamp:
log.msg("RX WAMP: %s" % str(msg))
## WAMP is proper JSON payload
##
try:
obj = self.factory._unserialize(msg)
except Exception as e:
self._protocolError("WAMP message payload could not be unserialized [%s]" % e)
return
## Every WAMP message is a list
##
if type(obj) != list:
self._protocolError("WAMP message payload not a list")
return
## Every WAMP message starts with an integer for message type
##
if len(obj) < 1:
self._protocolError("WAMP message without message type")
return
if type(obj[0]) != int:
self._protocolError("WAMP message type not an integer")
return
## WAMP message type
##
msgtype = obj[0]
## Valid WAMP message types received by WAMP clients
##
if msgtype not in [WampProtocol.MESSAGE_TYPEID_WELCOME,
WampProtocol.MESSAGE_TYPEID_CALL,
WampProtocol.MESSAGE_TYPEID_CALL_RESULT,
WampProtocol.MESSAGE_TYPEID_CALL_ERROR,
WampProtocol.MESSAGE_TYPEID_EVENT]:
self._protocolError("invalid WAMP message type %d" % msgtype)
return
if msgtype in [WampProtocol.MESSAGE_TYPEID_CALL,
WampProtocol.MESSAGE_TYPEID_CALL_RESULT,
WampProtocol.MESSAGE_TYPEID_CALL_ERROR]:
self.handlerMapping[msgtype].handleMessage(obj)
## WAMP EVENT
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_EVENT:
## Topic
##
if len(obj) != 3:
self._protocolError("WAMP EVENT message invalid length %d" % len(obj))
return
if type(obj[1]) not in [unicode, str]:
self._protocolError("invalid type for <topic> in WAMP EVENT message")
return
unresolvedTopicUri = str(obj[1])
topicUri = self.prefixes.resolveOrPass(unresolvedTopicUri) ### PFX - remove
## Fire PubSub Handler
##
if self.subscriptions.has_key(topicUri):
event = obj[2]
self.subscriptions[topicUri](topicUri, event)
else:
## event received for non-subscribed topic (could be because we
## just unsubscribed, and server already sent out event for
## previous subscription)
pass
## WAMP WELCOME
##
elif msgtype == WampProtocol.MESSAGE_TYPEID_WELCOME:
## Session ID
##
if len(obj) < 2:
self._protocolError("WAMP WELCOME message invalid length %d" % len(obj))
return
if type(obj[1]) not in [unicode, str]:
self._protocolError("invalid type for <sessionid> in WAMP WELCOME message")
return
self.session_id = str(obj[1])
## WAMP Protocol Version
##
if len(obj) > 2:
if type(obj[2]) not in [int]:
self._protocolError("invalid type for <version> in WAMP WELCOME message")
return
else:
self.session_protocol_version = obj[2]
else:
self.session_protocol_version = None
## Server Ident
##
if len(obj) > 3:
if type(obj[3]) not in [unicode, str]:
self._protocolError("invalid type for <server> in WAMP WELCOME message")
return
else:
self.session_server = obj[3]
else:
self.session_server = None
self.onSessionOpen()
else:
raise Exception("logic error")
def prefix(self, prefix, uri):
"""
Establishes a prefix to be used in `CURIEs <http://en.wikipedia.org/wiki/CURIE>`_
instead of URIs having that prefix for both client-to-server and
server-to-client messages.
:param prefix: Prefix to be used in CURIEs.
:type prefix: str
:param uri: URI that this prefix will resolve to.
:type uri: str
"""
if type(prefix) != str:
raise Exception("invalid type for prefix")
if type(uri) not in [unicode, str]:
raise Exception("invalid type for URI")
if self.prefixes.get(prefix): ### PFX - keep
raise Exception("prefix already defined")
self.prefixes.set(prefix, uri) ### PFX - keep
msg = [WampProtocol.MESSAGE_TYPEID_PREFIX, prefix, uri]
self.sendMessage(self.factory._serialize(msg))
def publish(self, topicUri, event, excludeMe = None, exclude = None, eligible = None):
"""
Publish an event under a topic URI. The latter may be abbreviated using a
CURIE which has been previously defined using prefix(). The event must
be JSON serializable.
:param topicUri: The topic URI or CURIE.
:type topicUri: str
:param event: Event to be published (must be JSON serializable) or None.
:type event: value
:param excludeMe: When True, don't deliver the published event to myself (when I'm subscribed).
:type excludeMe: bool
:param exclude: Optional list of session IDs to exclude from receivers.
:type exclude: list of str
:param eligible: Optional list of session IDs to that are eligible as receivers.
:type eligible: list of str
"""
if type(topicUri) not in [unicode, str]:
raise Exception("invalid type for parameter 'topicUri' - must be string (was %s)" % type(topicUri))
if excludeMe is not None:
if type(excludeMe) != bool:
raise Exception("invalid type for parameter 'excludeMe' - must be bool (was %s)" % type(excludeMe))
if exclude is not None:
if type(exclude) != list:
raise Exception("invalid type for parameter 'exclude' - must be list (was %s)" % type(exclude))
if eligible is not None:
if type(eligible) != list:
raise Exception("invalid type for parameter 'eligible' - must be list (was %s)" % type(eligible))
if exclude is not None or eligible is not None:
if exclude is None:
if excludeMe is not None:
if excludeMe:
exclude = [self.session_id]
else:
exclude = []
else:
exclude = [self.session_id]
if eligible is not None:
msg = [WampProtocol.MESSAGE_TYPEID_PUBLISH, topicUri, event, exclude, eligible]
else:
msg = [WampProtocol.MESSAGE_TYPEID_PUBLISH, topicUri, event, exclude]
else:
if excludeMe:
msg = [WampProtocol.MESSAGE_TYPEID_PUBLISH, topicUri, event]
else:
msg = [WampProtocol.MESSAGE_TYPEID_PUBLISH, topicUri, event, excludeMe]
try:
o = self.factory._serialize(msg)
except:
raise Exception("invalid type for parameter 'event' - not JSON serializable")
self.sendMessage(o)
def subscribe(self, topicUri, handler):
"""
Subscribe to topic. When already subscribed, will overwrite the handler.
:param topicUri: URI or CURIE of topic to subscribe to.
:type topicUri: str
:param handler: Event handler to be invoked upon receiving events for topic.
:type handler: Python callable, will be called as in <callable>(eventUri, event).
"""
if type(topicUri) not in [unicode, str]:
raise Exception("invalid type for parameter 'topicUri' - must be string (was %s)" % type(topicUri))
if not hasattr(handler, '__call__'):
raise Exception("invalid type for parameter 'handler' - must be a callable (was %s)" % type(handler))
turi = self.prefixes.resolveOrPass(topicUri) ### PFX - keep
if not self.subscriptions.has_key(turi):
msg = [WampProtocol.MESSAGE_TYPEID_SUBSCRIBE, topicUri]
o = self.factory._serialize(msg)
self.sendMessage(o)
self.subscriptions[turi] = handler
def unsubscribe(self, topicUri):
"""
Unsubscribe from topic. Will do nothing when currently not subscribed to the topic.
:param topicUri: URI or CURIE of topic to unsubscribe from.
:type topicUri: str
"""
if type(topicUri) not in [unicode, str]:
raise Exception("invalid type for parameter 'topicUri' - must be string (was %s)" % type(topicUri))
turi = self.prefixes.resolveOrPass(topicUri) ### PFX - keep
if self.subscriptions.has_key(turi):
msg = [WampProtocol.MESSAGE_TYPEID_UNSUBSCRIBE, topicUri]
o = self.factory._serialize(msg)
self.sendMessage(o)
del self.subscriptions[turi]
class WampClientFactory(WebSocketClientFactory, WampFactory):
"""
Twisted client factory for WAMP.
"""
protocol = WampClientProtocol
def __init__(self,
url,
debug = False,
debugCodePaths = False,
debugWamp = False,
debugApp = False,
reactor = None):
self.debugWamp = debugWamp
self.debugApp = debugApp
WebSocketClientFactory.__init__(self,
url,
protocols = ["wamp"],
debug = debug,
debugCodePaths = debugCodePaths,
reactor = reactor)
WampFactory.__init__(self)
def startFactory(self):
"""
Called by Twisted when the factory starts up. When overriding, make
sure to call the base method.
"""
if self.debugWamp:
log.msg("WebSocketClientFactory starting")
def stopFactory(self):
"""
Called by Twisted when the factory shuts down. When overriding, make
sure to call the base method.
"""
if self.debugWamp:
log.msg("WebSocketClientFactory stopped")
class WampCraProtocol(WampProtocol):
"""
Base class for WAMP Challenge-Response Authentication protocols (client and server).
WAMP-CRA is a cryptographically strong challenge response authentication
protocol based on HMAC-SHA256.
The protocol performs in-band authentication of WAMP clients to WAMP servers.
WAMP-CRA does not introduce any new WAMP protocol level message types, but
implements the authentication handshake via standard WAMP RPCs with well-known
procedure URIs and signatures.
"""
def deriveKey(secret, extra = None):
"""
Computes a derived cryptographic key from a password according to PBKDF2
http://en.wikipedia.org/wiki/PBKDF2.
The function will only return a derived key if at least 'salt' is
present in the 'extra' dictionary. The complete set of attributes
that can be set in 'extra':
salt: The salt value to be used.
iterations: Number of iterations of derivation algorithm to run.
keylen: Key length to derive.
:returns str -- The derived key or the original secret.
"""
if type(extra) == dict and extra.has_key('salt'):
salt = str(extra['salt'])
iterations = int(extra.get('iterations', 10000))
keylen = int(extra.get('keylen', 32))
b = pbkdf2_bin(secret, salt, iterations, keylen, hashlib.sha256)
return binascii.b2a_base64(b).strip()
else:
return secret
deriveKey = staticmethod(deriveKey)
def authSignature(self, authChallenge, authSecret = None, authExtra = None):
"""
Compute the authentication signature from an authentication challenge and a secret.
:param authChallenge: The authentication challenge.
:type authChallenge: str
:param authSecret: The authentication secret.
:type authSecret: str
:authExtra: Extra authentication information for salting the secret. (salt, keylen,
iterations)
:type authExtra: dict
:returns str -- The authentication signature.
"""
if authSecret is None:
authSecret = ""
if isinstance(authSecret, unicode):
authSecret = authSecret.encode('utf8')
authSecret = WampCraProtocol.deriveKey(authSecret, authExtra)
h = hmac.new(authSecret, authChallenge, hashlib.sha256)
sig = binascii.b2a_base64(h.digest()).strip()
return sig
class WampCraClientProtocol(WampClientProtocol, WampCraProtocol):
"""
Simple, authenticated WAMP client protocol.
The client can perform WAMP-Challenge-Response-Authentication ("WAMP-CRA") to authenticate
itself to a WAMP server. The server needs to implement WAMP-CRA also of course.
"""
def authenticate(self, authKey = None, authExtra = None, authSecret = None):
"""
Authenticate the WAMP session to server.
:param authKey: The key of the authentication credentials, something like a user or application name.
:type authKey: str
:param authExtra: Any extra authentication information.
:type authExtra: dict
:param authSecret: The secret of the authentication credentials, something like the user password or application secret key.
:type authsecret: str
:returns Deferred -- Deferred that fires upon authentication success (with permissions) or failure.
"""
def _onAuthChallenge(challenge):
if authKey is not None:
challengeObj = self.factory._unserialize(challenge)
if 'authextra' in challengeObj:
authExtra = challengeObj['authextra']
sig = self.authSignature(challenge, authSecret, authExtra)
else:
sig = self.authSignature(challenge, authSecret)
else:
sig = None
d = self.call(WampProtocol.URI_WAMP_PROCEDURE + "auth", sig)
return d
d = self.call(WampProtocol.URI_WAMP_PROCEDURE + "authreq", authKey, authExtra)
d.addCallback(_onAuthChallenge)
return d
class WampCraServerProtocol(WampServerProtocol, WampCraProtocol):
"""
Simple, authenticating WAMP server protocol.
The server lets clients perform WAMP-Challenge-Response-Authentication ("WAMP-CRA")
to authenticate. The clients need to implement WAMP-CRA also of course.
To implement an authenticating server, override:
* getAuthSecret
* getAuthPermissions
* onAuthenticated
in your class deriving from this class.
"""
clientAuthTimeout = 0
"""
Client authentication timeout in seconds or 0 for infinite. A client
must perform authentication after the initial WebSocket handshake within
this timeout or the connection is failed.
"""
clientAuthAllowAnonymous = True
"""
Allow anonymous client authentication. When this is set to True, a client
may "authenticate" as anonymous.
"""
def getAuthPermissions(self, authKey, authExtra):
"""
Get the permissions the session is granted when the authentication succeeds
for the given key / extra information.
Override in derived class to implement your authentication.
A permissions object is structured like this::
{'permissions': {'rpc': [
{'uri': / RPC Endpoint URI - String /,
'call': / Allow to call? - Boolean /}
],
'pubsub': [
{'uri': / PubSub Topic URI / URI prefix - String /,
'prefix': / URI matched by prefix? - Boolean /,
'pub': / Allow to publish? - Boolean /,
'sub': / Allow to subscribe? - Boolean /}
]
}
}
You can add custom information to this object. The object will be provided again
when the client authentication succeeded in :meth:`onAuthenticated`.
:param authKey: The authentication key.
:type authKey: str
:param authExtra: Authentication extra information.
:type authExtra: dict
:returns obj or Deferred -- Return a permissions object or None when no permissions granted.
"""
return None
def getAuthSecret(self, authKey):
"""
Get the authentication secret for an authentication key, i.e. the
user password for the user name. Return None when the authentication
key does not exist.
Override in derived class to implement your authentication.
:param authKey: The authentication key.
:type authKey: str
:returns str or Deferred -- The authentication secret for the key or None when the key does not exist.
"""
return None
def onAuthTimeout(self):
"""
Fired when the client does not authenticate itself in time. The default implementation
will simply fail the connection.
May be overridden in derived class.
"""
if not self._clientAuthenticated:
log.msg("failing connection upon client authentication timeout [%s secs]" % self.clientAuthTimeout)
self.failConnection()
def onAuthenticated(self, authKey, permissions):
"""
Fired when client authentication was successful.
Override in derived class and register PubSub topics and/or RPC endpoints.
:param authKey: The authentication key the session was authenticated for.
:type authKey: str
:param permissions: The permissions object returned from :meth:`getAuthPermissions`.
:type permissions: obj
"""
pass
def registerForPubSubFromPermissions(self, permissions):
"""
Register topics for PubSub from auth permissions.
:param permissions: The permissions granted to the now authenticated client.
:type permissions: list
"""
for p in permissions['pubsub']:
## register topics for the clients
##
pubsub = (WampServerProtocol.PUBLISH if p['pub'] else 0) | \
(WampServerProtocol.SUBSCRIBE if p['sub'] else 0)
topic = p['uri']
if self.pubHandlers.has_key(topic) or self.subHandlers.has_key(topic):
## FIXME: handle dups!
log.msg("DUPLICATE TOPIC PERMISSION !!! " + topic)
self.registerForPubSub(topic, p['prefix'], pubsub)
def onSessionOpen(self):
"""
Called when WAMP session has been established, but not yet authenticated. The default
implementation will prepare the session allowing the client to authenticate itself.
"""
## register RPC endpoints for WAMP-CRA authentication
##
self.registerForRpc(self, WampProtocol.URI_WAMP_PROCEDURE, [WampCraServerProtocol.authRequest,
WampCraServerProtocol.auth])
## reset authentication state
##
self._clientAuthenticated = False
self._clientPendingAuth = None
self._clientAuthTimeoutCall = None
## client authentication timeout
##
if self.clientAuthTimeout > 0:
self._clientAuthTimeoutCall = self.factory.reactor.callLater(self.clientAuthTimeout, self.onAuthTimeout)
@exportRpc("authreq")
def authRequest(self, authKey = None, extra = None):
"""
RPC endpoint for clients to initiate the authentication handshake.
:param authKey: Authentication key, such as user name or application name.
:type authKey: str
:param extra: Authentication extra information.
:type extra: dict
:returns str -- Authentication challenge. The client will need to create an authentication signature from this.
"""
## check authentication state
##
if self._clientAuthenticated:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "already-authenticated"), "already authenticated")
if self._clientPendingAuth is not None:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "authentication-already-requested"), "authentication request already issues - authentication pending")
## check extra
##
if extra:
if type(extra) != dict:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-argument"), "extra not a dictionary (was %s)." % str(type(extra)))
else:
extra = {}
#for k in extra:
# if type(extra[k]) not in [str, unicode, int, long, float, bool, types.NoneType]:
# raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-argument"), "attribute '%s' in extra not a primitive type (was %s)" % (k, str(type(extra[k]))))
## check authKey
##
if authKey is None and not self.clientAuthAllowAnonymous:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "anonymous-auth-forbidden"), "authentication as anonymous forbidden")
if type(authKey) not in [str, unicode, types.NoneType]:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-argument"), "authentication key must be a string (was %s)" % str(type(authKey)))
d = maybeDeferred(self.getAuthSecret, authKey)
def onGetAuthSecretOk(authSecret, authKey, extra):
if authKey is not None and authSecret is None:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "no-such-authkey"), "authentication key '%s' does not exist." % authKey)
## each authentication request gets a unique authid, which can only be used (later) once!
##
authid = newid()
## create authentication challenge
##
info = {}
info['authid'] = authid
info['authkey'] = authKey
info['timestamp'] = utcnow()
info['sessionid'] = self.session_id
info['extra'] = extra
pp = maybeDeferred(self.getAuthPermissions, authKey, extra)
def onAuthPermissionsOk(res):
if res is None:
res = {'permissions': {}}
res['permissions'] = {'pubsub': [], 'rpc': []}
info['permissions'] = res['permissions']
if 'authextra' in res:
info['authextra'] = res['authextra']
if authKey:
## authenticated session
##
infoser = self.factory._serialize(info)
sig = self.authSignature(infoser, authSecret)
self._clientPendingAuth = (info, sig, res)
return infoser
else:
## anonymous session
##
self._clientPendingAuth = (info, None, res)
return None
def onAuthPermissionsError(e):
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "auth-permissions-error"), str(e))
pp.addCallbacks(onAuthPermissionsOk, onAuthPermissionsError)
return pp
d.addCallback(onGetAuthSecretOk, authKey, extra)
return d
@exportRpc("auth")
def auth(self, signature = None):
"""
RPC endpoint for clients to actually authenticate after requesting authentication and computing
a signature from the authentication challenge.
:param signature: Authentication signature computed by the client.
:type signature: str
:returns list -- A list of permissions the client is granted when authentication was successful.
"""
## check authentication state
##
if self._clientAuthenticated:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "already-authenticated"), "already authenticated")
if self._clientPendingAuth is None:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "no-authentication-requested"), "no authentication previously requested")
## check signature
##
if type(signature) not in [str, unicode, types.NoneType]:
raise Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-argument"), "signature must be a string or None (was %s)" % str(type(signature)))
if self._clientPendingAuth[1] != signature:
## delete pending authentication, so that no retries are possible. authid is only valid for 1 try!!
##
self._clientPendingAuth = None
## notify the client of failed authentication, but only after a random,
## exponentially distributed delay. this (further) protects against
## timing attacks
##
d = Deferred()
def fail():
## FIXME: (optionally) drop the connection instead of returning RPC error?
##
d.errback(Exception(self.shrink(WampProtocol.URI_WAMP_ERROR + "invalid-signature"), "signature for authentication request is invalid"))
failDelaySecs = random.expovariate(1.0 / 0.8) # mean = 0.8 secs
self.factory.reactor.callLater(failDelaySecs, fail)
return d
## at this point, the client has successfully authenticated!
## get the permissions we determined earlier
##
perms = self._clientPendingAuth[2]
## delete auth request and mark client as authenticated
##
authKey = self._clientPendingAuth[0]['authkey']
self._clientAuthenticated = True
self._clientPendingAuth = None
if self._clientAuthTimeoutCall is not None:
self._clientAuthTimeoutCall.cancel()
self._clientAuthTimeoutCall = None
## fire authentication callback
##
self.onAuthenticated(authKey, perms)
## return permissions to client
##
return perms['permissions']
class Call:
"""
Thin-wrapper for incoming RPCs provided to call handlers registered via
- registerHandlerMethodForRpc
- registerHandlerProcedureForRpc
"""
def __init__(self,
proto,
callid,
uri,
args,
extra = None):
self.proto = proto
self.callid = callid
self.uri = uri
self.args = args
self.extra = extra
self.timings = None
class Handler(object):
"""
A handler for a certain class of messages.
"""
typeid = None
def __init__(self, proto, prefixes):
"""
Remember protocol and prefix map in instance variables.
"""
self.proto = proto
self.prefixes = prefixes
def handleMessage(self, msg_parts):
"""
Template method for handling a message.
Check if the correct handler for the message type was
called. Afterwards, assign all relevant parts of the message to
instance variables and call the (overridden) method
_handleMessage to actually handle the message.
"""
msgtype = msg_parts[0]
if self.typeid:
assert msgtype == self.typeid, \
"Message type %s does not match type id %s" % (msgtype,
self.typeid)
else:
assert False, \
"No typeid defined for %s" % self.__class__.__name__
if self._messageIsValid(msg_parts):
self._parseMessageParts(msg_parts)
self._handleMessage()
def _parseMessageParts(self, msg_parts):
"""
Assign the message parts to instance variables.
Has to be overridden in subclasses.
"""
raise NotImplementedError
def _messageIsValid(self, msg_parts):
"""
Check if the message parts have expected properties (type, etc.).
Has to be overridden in subclasses.
"""
raise NotImplementedError
def _handleMessage(self):
"""
Handle a specific kind of message.
Has to be overridden in subclasses.
"""
raise NotImplementedError
def maybeTrackTimings(self, call, msg):
"""
Track timings, if desired.
"""
if self.proto.trackTimings:
self.proto.doTrack(msg)
call.timings = self.proto.trackedTimings
self.proto.trackedTimings = Timings()
class CallHandler(Handler):
"""
A handler for incoming RPC calls.
"""
typeid = WampProtocol.MESSAGE_TYPEID_CALL
def _messageIsValid(self, msg_parts):
callid, uri = msg_parts[1:3]
if not isinstance(callid, (str, unicode)):
self.proto._protocolError(
("WAMP CALL message with invalid type %s for "
"<callid>") % type(callid))
return False
if not isinstance(uri, (str, unicode)):
self.proto._protocolError(
("WAMP CALL message with invalid type %s for "
"<uri>") % type(uri))
return False
return True
def _parseMessageParts(self, msg_parts):
"""
Parse message and create call object.
"""
self.callid = msg_parts[1]
self.uri = self.prefixes.resolveOrPass(msg_parts[2]) ### PFX - remove
self.args = msg_parts[3:]
def _handleMessage(self):
"""
Perform the RPC call and attach callbacks to its deferred object.
"""
call = self._onBeforeCall()
## execute incoming RPC
d = maybeDeferred(self._callProcedure, call)
## register callback and errback with extra argument call
d.addCallbacks(self._onAfterCallSuccess,
self._onAfterCallError,
callbackArgs = (call,),
errbackArgs = (call,))
def _onBeforeCall(self):
"""
Create call object to move around call data
"""
uri, args = self.proto.onBeforeCall(self.callid, self.uri, self.args, bool(self.proto.procForUri(self.uri)))
call = Call(self.proto, self.callid, uri, args)
self.maybeTrackTimings(call, "onBeforeCall")
return call
def _callProcedure(self, call):
"""
Actually performs the call of a procedure invoked via RPC.
"""
m = self.proto.procForUri(call.uri)
if m is None:
raise Exception(WampProtocol.URI_WAMP_ERROR_NO_SUCH_RPC_ENDPOINT, "No RPC endpoint registered for %s." % call.uri)
obj, method_or_proc, is_handler = m[:3]
if not is_handler:
return self._performProcedureCall(call, obj, method_or_proc)
else:
call.extra = m[3]
return self._delegateToRpcHandler(call, obj, method_or_proc)
def _performProcedureCall(self, call, obj, method_or_proc):
"""
Perform a RPC method / procedure call.
"""
cargs = tuple(call.args) if call.args else ()
if obj:
## call object method
return method_or_proc(obj, *cargs)
else:
## call free-standing function/procedure
return method_or_proc(*cargs)
def _delegateToRpcHandler(self, call, obj, method_or_proc):
"""
Delegate call to RPC handler.
"""
if obj:
## call RPC handler on object
return method_or_proc(obj, call)
else:
## call free-standing RPC handler
return method_or_proc(call)
def _onAfterCallSuccess(self, result, call):
"""
Execute custom success handler and send call result.
"""
## track timing and fire user callback
self.maybeTrackTimings(call, "onAfterCallSuccess")
call.result = self.proto.onAfterCallSuccess(result, call)
## send out WAMP message
self._sendCallResult(call)
def _onAfterCallError(self, error, call):
"""
Execute custom error handler and send call error.
"""
## track timing and fire user callback
self.maybeTrackTimings(call, "onAfterCallError")
call.error = self.proto.onAfterCallError(error, call)
## send out WAMP message
self._sendCallError(call)
def _sendCallResult(self, call):
"""
Marshal and send a RPC success result.
"""
msg = [WampProtocol.MESSAGE_TYPEID_CALL_RESULT, call.callid, call.result]
try:
rmsg = self.proto.serializeMessage(msg)
except:
raise Exception("call result not JSON serializable")
else:
## now actually send WAMP message
self.proto.sendMessage(rmsg)
## track timing and fire user callback
self.maybeTrackTimings(call, "onAfterSendCallSuccess")
self.proto.onAfterSendCallSuccess(rmsg, call)
def _sendCallError(self, call):
"""
Marshal and send a RPC error result.
"""
killsession = False
rmsg = None
try:
error_info, killsession = self._extractErrorInfo(call)
rmsg = self._assembleErrorMessage(call, *error_info)
except Exception as e:
rmsg = self._handleProcessingError(call, e)
finally:
if rmsg:
## now actually send WAMP message
self.proto.sendMessage(rmsg)
## track timing and fire user callback
self.maybeTrackTimings(call, "onAfterSendCallError")
self.proto.onAfterSendCallError(rmsg, call)
if killsession:
self.proto.sendClose(3000, "killing WAMP session upon request by application exception")
else:
raise Exception("fatal: internal error in CallHandler._sendCallError")
def _extractErrorInfo(self, call):
"""
Extract error information from the call.
"""
## get error args and len
##
eargs = call.error.value.args
nargs = len(eargs)
if nargs > 4:
raise Exception("invalid args length %d for exception" % nargs)
## erroruri & errordesc
##
if nargs == 0:
erroruri = WampProtocol.URI_WAMP_ERROR_GENERIC
errordesc = WampProtocol.DESC_WAMP_ERROR_GENERIC
elif nargs == 1:
erroruri = WampProtocol.URI_WAMP_ERROR_GENERIC
errordesc = eargs[0]
else:
erroruri = eargs[0]
errordesc = eargs[1]
## errordetails
##
errordetails = None
if nargs >= 3:
errordetails = eargs[2]
elif self.proto.includeTraceback:
try:
## we'd like to do ..
#tb = call.error.getTraceback()
## .. but the implementation in Twisted
## http://twistedmatrix.com/trac/browser/tags/releases/twisted-13.1.0/twisted/python/failure.py#L529
## uses cStringIO which cannot handle Unicode string in tracebacks. Hence we do our own:
io = StringIO.StringIO()
call.error.printTraceback(file = io)
tb = io.getvalue()
except Exception as ie:
print("INTERNAL ERROR [_extractErrorInfo / getTraceback()]: %s" % ie)
traceback.print_stack()
else:
errordetails = tb.splitlines()
## killsession
##
killsession = False
if nargs >= 4:
killsession = eargs[3]
## recheck all error component types
##
if type(erroruri) not in [str, unicode]:
raise Exception("invalid type %s for errorUri" % type(erroruri))
if type(errordesc) not in [str, unicode]:
raise Exception("invalid type %s for errorDesc" % type(errordesc))
## errordetails must be JSON serializable. If not, we get exception later in sendMessage.
## We don't check here, since the only way would be to serialize to JSON and
## then we'd serialize twice (here and in sendMessage)
if type(killsession) not in [bool, types.NoneType]:
raise Exception("invalid type %s for killSession" % type(killsession))
return (erroruri, errordesc, errordetails), killsession
def _assembleErrorMessage(self, call, erroruri, errordesc, errordetails):
"""
Assemble a WAMP RPC error message.
"""
if errordetails is not None:
msg = [WampProtocol.MESSAGE_TYPEID_CALL_ERROR,
call.callid,
self.prefixes.shrink(erroruri), ### PFX - remove
errordesc,
errordetails]
else:
msg = [WampProtocol.MESSAGE_TYPEID_CALL_ERROR,
call.callid,
self.prefixes.shrink(erroruri), ### PFX - remove
errordesc]
## serialize message. this can fail if errorDetails is not
## serializable
try:
rmsg = self.proto.serializeMessage(msg)
except Exception as e:
raise Exception(
"invalid object for errorDetails - not serializable (%s)" %
str(e))
return rmsg
def _handleProcessingError(self, call, e):
"""
Create a message describing what went wrong during processing an
exception.
"""
msg = [WampProtocol.MESSAGE_TYPEID_CALL_ERROR,
call.callid,
### PFX - remove
self.prefixes.shrink(WampProtocol.URI_WAMP_ERROR_INTERNAL),
str(e)]
if self.proto.includeTraceback:
try:
tb = call.error.getTraceback()
except Exception as ie:
## FIXME: find out why this can fail with
## "'unicode' does not have the buffer interface"
print("INTERNAL ERROR (getTraceback): %s" % ie)
else:
msg.append(tb.splitlines())
result = self.proto.serializeMessage(msg)
return result
class CallResultHandler(Handler):
"""
A handler for to RPC call results.
"""
typeid = WampProtocol.MESSAGE_TYPEID_CALL_RESULT
def _messageIsValid(self, msg_parts):
if len(msg_parts) < 2:
self.proto._protocolError(
"WAMP CALL_RESULT message without <callid>")
return False
if len(msg_parts) != 3:
self.proto._protocolError(
"WAMP CALL_RESULT message with invalid length %d" % len(msg_parts))
return False
if type(msg_parts[1]) not in [unicode, str]:
self.proto._protocolError(
("WAMP CALL_RESULT message with invalid type %s for "
"<callid>") % type(msg_parts[1]))
return False
return True
def _parseMessageParts(self, msg_parts):
"""
Extract call result from message parts.
"""
self.callid = str(msg_parts[1])
self.result = msg_parts[2]
def _handleMessage(self):
## Pop and process Call Deferred
##
d = self.proto.calls.pop(self.callid, None)
if d:
## WAMP CALL_RESULT
##
d.callback(self.result)
else:
if self.proto.debugWamp:
log.msg("callid not found for received call result message")
class CallErrorHandler(Handler):
typeid = WampProtocol.MESSAGE_TYPEID_CALL_ERROR
def _messageIsValid(self, msg_parts):
if len(msg_parts) not in [4, 5]:
self.proto._protocolError(
"call error message invalid length %d" % len(msg_parts))
return False
## Error URI
##
if type(msg_parts[2]) not in [unicode, str]:
self.proto._protocolError(
"invalid type %s for errorUri in call error message" %
str(type(msg_parts[2])))
return False
## Error Description
##
if type(msg_parts[3]) not in [unicode, str]:
self.proto._protocolError(
"invalid type %s for errorDesc in call error message" %
str(type(msg_parts[3])))
return False
return True
def _parseMessageParts(self, msg_parts):
"""
Extract error information from message parts.
"""
self.callid = str(msg_parts[1])
self.erroruri = str(msg_parts[2])
self.errordesc = str(msg_parts[3])
## Error Details
##
if len(msg_parts) > 4:
self.errordetails = msg_parts[4]
else:
self.errordetails = None
def _handleMessage(self):
"""
Fire Call Error Deferred.
"""
##
## Pop and process Call Deferred
d = self.proto.calls.pop(self.callid, None)
if d:
e = Exception()
e.args = (self.erroruri, self.errordesc, self.errordetails)
d.errback(e)
else:
if self.proto.debugWamp:
log.msg("callid not found for received call error message")
|
apache-2.0
|
routeflow/AutomaticConfigurationRouteFlow
|
pox/tests/unit/openflow/libopenflow_01_test.py
|
23
|
16017
|
#!/usr/bin/env python
import unittest
import sys
import os.path
from copy import copy
sys.path.append(os.path.dirname(__file__) + "/../../..")
from pox.openflow.libopenflow_01 import *
from pox.datapaths.switch import *
def extract_num(buf, start, length):
""" extracts a number from a raw byte string. Assumes network byteorder """
# note: purposefully does /not/ use struct.unpack, because that is used by the code we validate
val = 0
for i in range(start, start+length):
val <<= 8
val += ord(buf[i])
return val
class ofp_match_test(unittest.TestCase):
def test_bit_wildcards(self):
""" some checking of the bit-level wildcard magic in ofp_match"""
m = ofp_match()
# all match entries should start out as wildcarded
for k,v in ofp_match_data.iteritems():
self.assertEquals(getattr(m, k), None, "Attr %s should be wildcarded and reported as None" % k)
self.assertEquals(m.wildcards & v[1], v[1])
# try setting and unsetting specific bit-level match entries
for change in [ ("in_port", 1, OFPFW_IN_PORT), ("dl_vlan", 2, OFPFW_DL_VLAN), ("tp_dst", 22, OFPFW_TP_DST) ]:
setattr(m, change[0], change[1])
self.assertEquals(getattr(m, change[0]), change[1], "Attr %s should have been set to %s" % change[0:2])
self.assertEquals(m.wildcards & change[2], 0, "with %s set to %s, wildcard bit %x should get unset" % change)
setattr(m, change[0], None)
self.assertEquals(m.wildcards & change[2], change[2], "with %s reset from %s, wildcard bit %x should be set again" % change)
def test_ip_wildcard_magic(self):
""" ofp_match: check IP wildcard magic"""
# do this for both nw_src and nw_dst
for (attr, bitmask, shift) in ( ("nw_src", OFPFW_NW_SRC_MASK, OFPFW_NW_SRC_SHIFT), ( "nw_dst", OFPFW_NW_DST_MASK, OFPFW_NW_DST_SHIFT) ):
m = ofp_match()
self.assertEquals(getattr(m, "get_"+attr)(), (None, 0), "get_%s for unset %s should return (None,0)" % (attr, attr))
self.assertTrue( ((m.wildcards & bitmask) >> shift) >= 32)
# set a bunch of ip addresses with or without networks
for ipnet in ( "10.0.0.0/8", "172.16.0.0/16", "192.168.24.0/24", "1.2.3.4/30", "212.11.225.3"):
parts = ipnet.split("/")
ip = parts[0]
bits = int(parts[1]) if len(parts)>1 else 32
# set the IP address
setattr(m, attr, ipnet)
# gets converted to just the ip address during query
self.assertEqual(getattr(m, attr), ip)
# the get_#{attr} method gives a tuple of (ip, cidr-bits)
self.assertEqual( getattr(m, "get_"+attr)(), (ip, bits))
# the appropriate bits in the wildcard should be set
self.assertEqual( (m.wildcards & bitmask) >> shift, 32-bits)
# reset to 0.0.0.0/0 results in full wildcard
setattr(m, attr, "0.0.0.0/0")
self.assertEquals(getattr(m, "get_"+attr)(), (None, 0), "get_%s for unset %s should return (None,0)" % (attr, attr))
self.assertTrue( ((m.wildcards & bitmask) >> shift) >= 32)
def test_match_with_wildcards(self):
""" ofp_match: test the matches_with_wildcards method """
def create(wildcards=(), **kw):
m = ofp_match(in_port=1, dl_type=0, dl_src=EthAddr("00:00:00:00:00:01"), dl_dst=EthAddr("00:00:00:00:00:02"), dl_vlan=5, nw_proto=6, nw_src="10.0.0.1", nw_dst="11.0.0.1", tp_src = 12345, tp_dst=80)
if isinstance(wildcards, str):
wildcards = [wildcards]
for w in wildcards:
setattr(m, w, None)
for (k,v) in kw.iteritems():
m.__setattr__(k,v)
return m
def assertMatch(ref, other, msg=""):
self.assertTrue(ref.matches_with_wildcards(other), "%s - %s should match %s " % (msg, ref.show(), other.show()))
def assertNoMatch(ref, other, msg=""):
self.assertFalse(ref.matches_with_wildcards(other), "%s - %s should NOT match %s " % (msg, ref.show(), other.show()))
ref = create()
#print ref
# same instances match
assertMatch(ref, ref)
# equal instances match
assertMatch(ref, create())
# ofp_match with additional wildcard bits set match the ref, but not the other way round
for wildcards in ( [ "in_port" ], [ "dl_vlan" ], [ "dl_src", "dl_dst" ] ):
wilder = create(wildcards=wildcards)
assertMatch(wilder, ref)
assertNoMatch(ref, wilder)
# when fields are wildcarded, we can change around the actual values and it will still match
for changes in ( { "in_port": 15 }, { "dl_src": "12:34:56:78:90:ab", "dl_vlan": 7 }, { "tp_dst" : 22 } ):
wild = create()
concrete = create()
for (k,v) in changes.iteritems():
setattr(wild, k, None)
setattr(concrete, k, v)
assertMatch(wild, concrete)
assertNoMatch(concrete, wild)
# play around with nw src addresses
assertMatch(create(nw_src="10.0.0.0/24"), ref)
assertMatch(create(nw_src="10.0.0.0/24"), create(nw_src="10.0.0.0/25"))
assertNoMatch(create(nw_src="10.0.0.0/25"), create(nw_src="10.0.0.0/24"))
assertMatch(create(nw_src="10.0.0.0/25"), create(nw_src="10.0.0.127"))
assertNoMatch(create(nw_src="10.0.0.0/25"), create(nw_src="10.0.0.128"))
class ofp_command_test(unittest.TestCase):
# custom map of POX class to header type, for validation
ofp_type = {
ofp_features_reply: OFPT_FEATURES_REPLY,
ofp_set_config: OFPT_SET_CONFIG,
ofp_flow_mod: OFPT_FLOW_MOD,
ofp_port_mod: OFPT_PORT_MOD,
ofp_queue_get_config_request: OFPT_QUEUE_GET_CONFIG_REQUEST,
ofp_queue_get_config_reply: OFPT_QUEUE_GET_CONFIG_REPLY,
ofp_stats_request: OFPT_STATS_REQUEST,
ofp_stats_reply: OFPT_STATS_REPLY,
ofp_packet_out: OFPT_PACKET_OUT,
ofp_barrier_reply: OFPT_BARRIER_REPLY,
ofp_barrier_request: OFPT_BARRIER_REQUEST,
ofp_packet_in: OFPT_PACKET_IN,
ofp_flow_removed: OFPT_FLOW_REMOVED,
ofp_port_status: OFPT_PORT_STATUS,
ofp_error: OFPT_ERROR,
ofp_hello: OFPT_HELLO,
ofp_echo_request: OFPT_ECHO_REQUEST,
ofp_echo_reply: OFPT_ECHO_REPLY,
ofp_vendor_generic: OFPT_VENDOR,
ofp_features_request: OFPT_FEATURES_REQUEST,
ofp_get_config_request: OFPT_GET_CONFIG_REQUEST,
ofp_get_config_reply: OFPT_GET_CONFIG_REPLY,
ofp_set_config: OFPT_SET_CONFIG
}
def assert_packed_header(self, pack, ofp_type, length, xid):
""" check openflow header fields in packed byte array """
def assert_num(name, start, length, expected):
val = extract_num(pack, start, length)
self.assertEquals(val, expected, "packed header check: %s for ofp type %s should be %d (is %d)" % (name, ofp_type_map[ofp_type], expected, val))
assert_num("OpenFlow version", 0, 1, 1)
assert_num("header_type", 1, 1, ofp_type)
assert_num("length in header", 2, 2, length)
assert_num("xid", 4, 4, xid)
def _test_pack_unpack(self, o, xid, ofp_type=None):
""" check that packing and unpacking an ofp object works, and that lengths etc. are correct """
show = lambda(o): o.show() if hasattr(o, "show") else str(show)
if not ofp_type:
ofp_type = self.ofp_type[type(o)]
self.assertTrue(o._assert(), "pack_unpack for %s -- original object should _assert to true"%show(o))
# show the object to make sure that works
o.show()
# pack object
pack = o.pack()
# byte array length should equal calculated length
self.assertEqual(len(o), len(pack), "pack_unpack for %s -- len(object)=%d != len(packed)=%d" % (type(o), len(o), len(pack)))
# check header fields in packed byte array
self.assert_packed_header(pack, ofp_type, len(o), xid)
# now unpack
unpacked = type(o)()
unpacked.unpack(pack)
self.assertEqual(o, unpacked, "pack_unpacked -- original != unpacked\n===Original:\n%s\n===Repacked:%s\n" % (show(o), show(unpacked)))
return unpacked
def test_header_pack_unpack(self):
for kw in ( { "header_type": OFPT_PACKET_OUT, "xid": 1 },
{ "header_type": OFPT_FLOW_MOD, "xid": 2 }):
# Can't directly pack a header, since it has no length...
class H (ofp_header):
def __len__ (self):
return 8
o = H(**kw)
self._test_pack_unpack(o, kw["xid"], kw["header_type"])
def test_pack_all_comands_simple(self):
xid_gen = xid_generator()
for cls in ( ofp_features_reply,
ofp_set_config,
ofp_get_config_reply,
ofp_flow_mod,
ofp_port_mod,
ofp_queue_get_config_request,
ofp_queue_get_config_reply,
ofp_stats_request,
ofp_stats_reply,
ofp_packet_out,
ofp_barrier_reply,
ofp_barrier_request,
ofp_packet_in,
ofp_flow_removed,
ofp_port_status,
ofp_error,
ofp_hello,
ofp_echo_request,
ofp_echo_reply,
ofp_features_request,
ofp_get_config_request,
ofp_get_config_reply,
ofp_set_config ):
xid = xid_gen()
args = {}
# Customize initializer
if cls is ofp_stats_reply:
args['body'] = ofp_desc_stats(sw_desc="POX")
elif cls is ofp_stats_request:
args['body'] = ofp_vendor_stats_generic(vendor=0xcafe)
o = cls(xid=xid, **args)
self._test_pack_unpack(o, xid)
out = ofp_action_output
dl_addr = ofp_action_dl_addr
some_actions = ([], [out(port=2)], [out(port=2), out(port=3)], [ out(port=OFPP_FLOOD) ], [ dl_addr.set_dst(EthAddr("00:"*5 + "01")), out(port=1) ])
def test_pack_custom_packet_out(self):
xid_gen = xid_generator()
packet = ethernet(src=EthAddr("00:00:00:00:00:01"), dst=EthAddr("00:00:00:00:00:02"),
payload=ipv4(srcip=IPAddr("1.2.3.4"), dstip=IPAddr("1.2.3.5"),
payload=udp(srcport=1234, dstport=53, payload="haha"))).pack()
for actions in self.some_actions:
for attrs in ( { 'data': packet }, { 'buffer_id': 5 } ):
xid = xid_gen()
o = ofp_packet_out(xid=xid, actions=actions, **attrs)
self._test_pack_unpack(o, xid, OFPT_PACKET_OUT)
def test_pack_flow_mod_openflow_dl_type_wildcards(self):
""" Openflow 1.1 spec clarifies that wildcards should not be set when the protocol in
question is not matched i.e., dl_type != 0x800 -> no wildcards for IP.
Test this here """
def show_wildcards(w):
parts = [ k.lower()[len("OFPFW_"):] for (k,v) in ofp_flow_wildcards_rev_map.iteritems() if v & w == v ]
nw_src_bits = (w & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
nw_src_bits = (w & OFPFW_NW_SRC_MASK) >> OFPFW_NW_SRC_SHIFT
if(nw_src_bits > 0): parts.append("nw_src(/%d)" % (32 - nw_src_bits))
nw_dst_bits = (w & OFPFW_NW_DST_MASK) >> OFPFW_NW_DST_SHIFT
if(nw_dst_bits > 0): parts.append("nw_dst(/%d)" % (32 - nw_dst_bits))
return "|".join(parts)
def test_wildcards(match, expected):
(packed,) = struct.unpack_from("!L", match.pack(flow_mod=True))
self.assertEquals(packed, expected, "packed: %s <> expected: %s" % (show_wildcards(packed), show_wildcards(expected)))
# no dl type specified -> wildcards for nw/dl are cleared
test_wildcards(ofp_match(), OFPFW_ALL & ~ (OFPFW_NW_TOS | OFPFW_NW_PROTO | OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK | OFPFW_TP_SRC | OFPFW_TP_DST))
all_normalized = (OFPFW_ALL & ~ (OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK)) | \
OFPFW_NW_SRC_ALL | OFPFW_NW_DST_ALL
# dl type = ARP -> certain wildcards live
test_wildcards(ofp_match(dl_type=0x806), all_normalized & ~ (OFPFW_NW_TOS | OFPFW_TP_SRC | OFPFW_TP_DST | OFPFW_DL_TYPE))
# dl type = IP -> more wildcards live
test_wildcards(ofp_match(dl_type=0x800), all_normalized & ~ (OFPFW_TP_SRC | OFPFW_TP_DST | OFPFW_DL_TYPE))
# dl type = IP, nw_proto=UDP -> alll wildcards live
test_wildcards(ofp_match(dl_type=0x800,nw_proto=6), all_normalized & ~(OFPFW_DL_TYPE | OFPFW_NW_PROTO))
def test_pack_custom_flow_mod(self):
out = ofp_action_output
xid_gen = xid_generator()
for match in ( ofp_match(),
ofp_match(in_port=1, dl_type=0x88cc, dl_src=EthAddr("00:00:00:00:00:01"), dl_dst=EthAddr("00:00:00:00:00:02")),
ofp_match(in_port=1, dl_type=0x0806, dl_src=EthAddr("00:00:00:00:00:01"), dl_dst=EthAddr("00:00:00:00:00:02"), nw_src="10.0.0.1", nw_dst="11.0.0.1"),
ofp_match(in_port=1, dl_type=0x0800, dl_src=EthAddr("00:00:00:00:00:01"), dl_dst=EthAddr("00:00:00:00:00:02"), dl_vlan=5, nw_proto=6, nw_src="10.0.0.1", nw_dst="11.0.0.1", tp_src = 12345, tp_dst=80)):
for actions in self.some_actions:
for command in ( OFPFC_ADD, OFPFC_DELETE, OFPFC_DELETE_STRICT, OFPFC_MODIFY_STRICT, OFPFC_MODIFY_STRICT ):
for attrs in ( {}, { 'buffer_id' : 123 }, { 'idle_timeout': 5, 'hard_timeout': 10 } ):
xid = xid_gen()
o = ofp_flow_mod(xid=xid, command=command, match = match, actions=actions, **attrs)
unpacked = self._test_pack_unpack(o, xid, OFPT_FLOW_MOD)
self.assertEqual(unpacked.match, match)
self.assertEqual(unpacked.command, command)
self.assertEqual(unpacked.actions, actions)
for (check_attr,val) in attrs.iteritems():
self.assertEqual(getattr(unpacked, check_attr), val)
class ofp_action_test(unittest.TestCase):
def assert_packed_action(self, cls, packed, a_type, length):
self.assertEqual(extract_num(packed, 0,2), a_type, "Action %s: expected type %d (but is %d)" % (cls, a_type, extract_num(packed, 0,2)))
self.assertEqual(extract_num(packed, 2,2), length, "Action %s: expected length %d (but is %d)" % (cls, length, extract_num(packed, 2,2)))
def test_pack_all_actions_simple(self):
def c(cls, a_type, kw, length):
action = cls(**kw)
packed = action.pack()
self.assertEqual(len(action), len(packed))
self.assert_packed_action(cls, packed, a_type, length)
unpacked = cls()
unpacked.unpack(packed)
self.assertEqual(action, unpacked)
for (k, v) in kw.iteritems():
self.assertEqual(getattr(unpacked, k), v)
return packed
c(ofp_action_output, OFPAT_OUTPUT, { 'port': 23 }, 8 )
c(ofp_action_enqueue, OFPAT_ENQUEUE, { 'port': 23, 'queue_id': 1 }, 16 )
c(ofp_action_vlan_vid, OFPAT_SET_VLAN_VID, { 'vlan_vid' : 123}, 8 )
c(ofp_action_vlan_pcp, OFPAT_SET_VLAN_PCP, { 'vlan_pcp' : 123}, 8 )
p = c(ofp_action_dl_addr.set_dst, OFPAT_SET_DL_DST, { 'dl_addr' : EthAddr("01:02:03:04:05:06").toRaw() }, 16 )
self.assertEquals(extract_num(p, 4,6), 0x010203040506)
p = c(ofp_action_dl_addr.set_src, OFPAT_SET_DL_SRC, { 'dl_addr' : EthAddr("ff:ee:dd:cc:bb:aa").toRaw() }, 16 )
self.assertEquals(extract_num(p, 4,6), 0xffeeddccbbaa, "Ethernet in packed is %x, but should be ff:ee:dd:cc:bb:aa" % extract_num(p, 4, 6))
p = c(ofp_action_nw_addr.set_dst, OFPAT_SET_NW_DST, { 'nw_addr' : IPAddr("1.2.3.4") }, 8 )
self.assertEquals(extract_num(p, 4,4), 0x01020304)
p = c(ofp_action_nw_addr.set_src, OFPAT_SET_NW_SRC, { 'nw_addr' : IPAddr("127.0.0.1") }, 8 )
self.assertEquals(extract_num(p, 4,4), 0x7f000001)
c(ofp_action_nw_tos, OFPAT_SET_NW_TOS, { 'nw_tos' : 4 }, 8)
p = c(ofp_action_tp_port.set_dst, OFPAT_SET_TP_DST, { 'tp_port' : 80 }, 8)
self.assertEquals(extract_num(p, 4,2), 80)
p = c(ofp_action_tp_port.set_src, OFPAT_SET_TP_SRC, { 'tp_port' : 22987 }, 8)
self.assertEquals(extract_num(p, 4,2), 22987)
# c(ofp_action_push_mpls, OFPAT_PUSH_MPLS, {'ethertype':0x8847}, 8)
# c(ofp_action_pop_mpls, OFPAT_POP_MPLS, {'ethertype':0x0800}, 8)
# c(ofp_action_mpls_dec_ttl, OFPAT_DEC_MPLS_TTL, {}, 8)
# c(ofp_action_mpls_label, OFPAT_SET_MPLS_LABEL, {'mpls_label': 0xa1f}, 8)
# c(ofp_action_mpls_tc, OFPAT_SET_MPLS_TC, {'mpls_tc': 0xac}, 8)
# c(ofp_action_mpls_ttl, OFPAT_SET_MPLS_TTL, {'mpls_ttl': 0xaf}, 8)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
Whatang/DrumBurp
|
src/Widgets/measureTabs_plugin.py
|
1
|
1611
|
# Copyright 2011-12 Michael Thomas
#
# See www.whatang.org for more information.
#
# This file is part of DrumBurp.
#
# DrumBurp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DrumBurp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DrumBurp. If not, see <http://www.gnu.org/licenses/>
'''
Created on 17 Apr 2011
@author: Mike Thomas
'''
from PyQt4.QtDesigner import QPyDesignerCustomWidgetPlugin
from .measureTabs import measureTabs
# IGNORE:interface-not-implemented
class measureTabs_plugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
super(measureTabs_plugin, self).__init__(parent)
self.initialized = False
def createWidget(self, parent):
widget = measureTabs(parent)
return widget
def name(self):
return "measureTabs"
def group(self):
return "DrumBurp Widgets"
def toolTip(self):
return ""
def whatsThis(self):
return ""
def isContainer(self):
return True
def domXml(self):
return '<widget class="measureTabs" name="measureTabs" />\n'
def includeFile(self):
return "Widgets.measureTabs_plugin"
|
gpl-3.0
|
simonvh/pyDNase
|
pyDNase/footprinting/__init__.py
|
1
|
10664
|
# Copyright (C) 2013 Jason Piper - [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from . import fastbinom as binom
from itertools import tee
import numpy.random
import numpy as np
import pyDNase
import warnings
class wellington(object):
def __init__(self, interval, reads,
shoulder_sizes=range(35,36), footprint_sizes = range(11,26,2), FDR=0, bonferroni = 0,):
self.interval = interval
#The footprint scores are calculated at instantiation.
self.scores, self.lengths = self.calculate(reads, shoulder_sizes,footprint_sizes,FDR, bonferroni)
def footprints(self, withCutoff=-30, merge = 1):
"""
This returns reads GenomicIntervalSet with the intervals retrieved below the specific cutoff applied to the selected data
"""
#This find the positions of all the ranges below the cutoff using reads new method
ranges = []
tempMLE, templogProb = np.copy(self.lengths), np.copy(self.scores)
while templogProb.min() < withCutoff:
minimapos = templogProb.argmin()
minimafplen = tempMLE[minimapos]
minimaphalffplen = int(minimafplen)/2
lbound = max(minimapos-minimaphalffplen,0)
rbound = min(minimapos+minimaphalffplen,len(templogProb))
ranges.append((lbound,rbound,templogProb.min()))
templogProb[lbound:rbound] = 1
tempMLE[lbound:rbound] = 1
returnSet = pyDNase.GenomicIntervalSet()
#Merges overlapping ranges (TODO: documentation)
if ranges:
if merge:
ranges = sorted(ranges)
merged_ranges = [ranges[0]]
for c, d, e in ranges[1:]:
a, b, f = merged_ranges[-1]
if c<=b<d:
merged_ranges[-1] = a, d , min(e,f)
elif b<c<d:
merged_ranges.append((c,d,e))
else:
merged_ranges = ranges
#Creates reads GenomicIntervalSet and adds the footprints to them
for i in merged_ranges:
rstartbp = self.interval.startbp + i[0]
#We must add one to the end base of the footprint to account for the BED file format
rendbp = self.interval.startbp + i[1] + 1
region = pyDNase.GenomicInterval(self.interval.chromosome, rstartbp, rendbp, strand="+",score=i[2])
returnSet += region
return returnSet
def window(self, iterable, size):
"""
Takes reads list (iterable) and returns reads list, each of length size, of rolling windows.
>>> [i for i in window(range(0,12,2), 3)]
[(0, 2, 4), (2, 4, 6), (4, 6, 8), (6, 8, 10)]
"""
iters = tee(iterable, size)
for i in range(1, size):
for each in iters[i:]:
next(each, None)
return zip(*iters)
def calculate(self,reads,shoulder_sizes=range(35,36),footprint_sizes = range(11,26,2), FDR=0, bonferroni = 0):
#TODO: write docstring and doctest
if self.interval.strand is "-":
warnings.warn("You're footprinting an interval on the reverse strand! "+
"You should be sure you know what you're doing as wellington was not designed for this! "+
"Ensure that all the intervals you provide to wellington are on the +ve strand for published behaviour",UserWarning)
cuts = reads[self.interval]
forwardArray, backwardArray = cuts["+"].tolist(), cuts["-"].tolist()
if FDR:
numpy.random.shuffle(forwardArray)
numpy.random.shuffle(backwardArray)
#Let's compute all the possible binding arrays, this really helps when iterating over multiple footprint sizes
fw_fpscores_dict = {}
rv_fpscores_dict = {}
for fp_size in footprint_sizes:
halffpround = int((fp_size-1)/2)
fw_fpscores_dict[fp_size] = [0] * halffpround + [sum(i) for i in self.window(forwardArray, fp_size)]
rv_fpscores_dict[fp_size] = [0] * halffpround + [sum(i) for i in self.window(backwardArray,fp_size)]
#Empty list of lists for storing the footprint scores
log_probs = [[] for i in range(len(forwardArray))]
if bonferroni:
bonferroni_factor = np.log(1.0/sum(reads.samfile.lengths))
#testing multiple background sizes
for shoulder_size in shoulder_sizes:
#This computes the background cut sums for the specified shoulder_size for all basepairs
f_bindingArray = [0] * (shoulder_size - 1) + [sum(i) for i in self.window(forwardArray,shoulder_size)]
b_bindingArray = [sum(i) for i in self.window(backwardArray,shoulder_size)] + [0] * (shoulder_size - 1)
for fp_size in footprint_sizes:
halffpround = int((fp_size-1)/2)
#This computes the binding cut sums for the specified fp_size for all basepairs
fw_fpscores = fw_fpscores_dict[fp_size]
rv_fpscores = rv_fpscores_dict[fp_size]
for i in range(shoulder_size+halffpround,len(forwardArray)-shoulder_size-halffpround):
xForward = f_bindingArray[i-halffpround-1]
nForward = xForward + fw_fpscores[i]
xBackward = b_bindingArray[i+halffpround+1]
nBackward = xBackward + rv_fpscores[i]
#This requires that there are DNase Cuts present on both strands
if xForward and xBackward:
#Null hypothesis for probability to randomly hit background
p = float(shoulder_size) / (shoulder_size + fp_size)
#This stores the P values and the fp_size used to calculate them in reads tuple. in the log_probs[] list
score = binom.logsf(int(xForward - 1), int(nForward), p) + binom.logsf(int(xBackward - 1), int(nBackward), p)
log_probs[i].append([score,fp_size])
#This iterates over all the base pairs in the region and creates arrays for the best score and footprint size
best_probabilities = []
best_footprintsizes = []
for i in range(len(forwardArray)):
if log_probs[i]:
best_params = min(log_probs[i])
#This catches anything which has floated to negative infinity - but it might not be the best way
best_score = max(-1000,best_params[0])
if bonferroni:
best_probabilities.append(min(0,best_score - bonferroni_factor))
else:
best_probabilities.append(best_score)
best_footprintsizes.append(best_params[1])
else:
best_probabilities.append(0)
best_footprintsizes.append(0)
return (np.array(best_probabilities), np.array(best_footprintsizes))
class wellington1D(wellington):
def calculate(self,reads,shoulder_sizes=range(35,36),footprint_sizes = range(11,26,2), FDR=0, bonferroni = 0):
#TODO: write docstring and doctest
#Here we use some precomputed sums to avoid multiple calculations
cuts = reads[self.interval]
forwardArray, backwardArray = cuts["+"], cuts["-"]
cutArray = (forwardArray + backwardArray).tolist()
if FDR:
numpy.random.shuffle(cutArray)
#Empty list of lists for storing the footprint scores
log_probs = [[] for i in range(len(cutArray))]
if bonferroni:
bonferroni_factor = np.log(1/float(sum(reads.samfile.lengths)))
for shoulder_size in shoulder_sizes:
#This computes the background cut sums for the specified shoulder_size for all basepairs
f_bindingArray = [0] * (shoulder_size - 1) + [sum(i) for i in self.window(cutArray,shoulder_size)]
b_bindingArray = [sum(i) for i in self.window(cutArray,shoulder_size)] + [0] * (shoulder_size - 1)
for fp_size in footprint_sizes:
halffpround = int((fp_size-1)/2)
#This computes the binding cut sums for the specified fp_size for all basepairs
fpscores = [0] * halffpround + [sum(i) for i in self.window(cutArray, fp_size)]
for i in range(shoulder_size+halffpround,len(forwardArray)-shoulder_size-halffpround):
xfor = f_bindingArray[i-halffpround-1]
xback = b_bindingArray[i+halffpround+1]
x = xfor + xback
n = x + fpscores[i]
#This requires that there are actually tags present in these windows
if x:
p = (shoulder_size*2) / float((shoulder_size*2) + fp_size)
#This stores the p values and the fp_size used to calculate them in reads tuple. in the log_probs[] list
score = binom.logsf(int(x - 1), int(n), p)
log_probs[i].append([score,fp_size])
#This iterates over all the base pairs in the region and creates arrays for the best score and footprint size
best_probabilities = []
best_footprintsizes = []
for i in range(len(cutArray)):
if log_probs[i]:
best_params = min(log_probs[i])
#This catches anything which has floated to negative infinity - but it might not be the best way
best_score = max(-1000,best_params[0])
if bonferroni:
best_probabilities.append(min(0,best_score - bonferroni_factor))
else:
best_probabilities.append(best_score)
best_footprintsizes.append(best_params[1])
else:
best_probabilities.append(0)
best_footprintsizes.append(0)
return (np.array(best_probabilities), np.array(best_footprintsizes))
|
gpl-3.0
|
gechong/XlsxWriter
|
xlsxwriter/test/comparison/test_hyperlink04.py
|
8
|
2663
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'hyperlink04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet('Data Sheet')
worksheet1.write_url('A1', "internal:Sheet2!A1")
worksheet1.write_url('A3', "internal:Sheet2!A1:A5")
worksheet1.write_url('A5', "internal:'Data Sheet'!D5", None, 'Some text')
worksheet1.write_url('E12', "internal:Sheet1!J1")
worksheet1.write_url('G17', "internal:Sheet2!A1", None, 'Some text')
worksheet1.write_url('A18', "internal:Sheet2!A1", None, None, 'Tool Tip 1')
worksheet1.write_url('A20', "internal:Sheet2!A1", None, 'More text', 'Tool Tip 2')
workbook.close()
self.assertExcelEqual()
def test_create_file_write(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks with write()"""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet3 = workbook.add_worksheet('Data Sheet')
worksheet1.write('A1', "internal:Sheet2!A1")
worksheet1.write('A3', "internal:Sheet2!A1:A5")
worksheet1.write('A5', "internal:'Data Sheet'!D5", None, 'Some text')
worksheet1.write('E12', "internal:Sheet1!J1")
worksheet1.write('G17', "internal:Sheet2!A1", None, 'Some text')
worksheet1.write('A18', "internal:Sheet2!A1", None, None, 'Tool Tip 1')
worksheet1.write('A20', "internal:Sheet2!A1", None, 'More text', 'Tool Tip 2')
workbook.close()
self.assertExcelEqual()
|
bsd-2-clause
|
shinglyu/servo
|
tests/wpt/web-platform-tests/html/tools/update_html5lib_tests.py
|
88
|
5363
|
import sys
import os
import hashlib
import urllib
import itertools
import re
import json
import glob
import shutil
try:
import genshi
from genshi.template import MarkupTemplate
from html5lib.tests import support
except ImportError:
print """This script requires the Genshi templating library and html5lib source
It is recommended that these are installed in a virtualenv:
virtualenv venv
source venv/bin/activate
pip install genshi
cd venv
git clone [email protected]:html5lib/html5lib-python.git html5lib
cd html5lib
git submodule init
git submodule update
pip install -e ./
Then run this script again, with the virtual environment still active.
When you are done, type "deactivate" to deactivate the virtual environment.
"""
TESTS_PATH = "html/syntax/parsing/"
def get_paths():
script_path = os.path.split(os.path.abspath(__file__))[0]
repo_base = get_repo_base(script_path)
tests_path = os.path.join(repo_base, TESTS_PATH)
return script_path, tests_path
def get_repo_base(path):
while path:
if os.path.exists(os.path.join(path, ".git")):
return path
else:
path = os.path.split(path)[0]
def get_expected(data):
data = "#document\n" + data
return data
def get_hash(data, container=None):
if container == None:
container = ""
return hashlib.sha1("#container%s#data%s"%(container.encode("utf8"),
data.encode("utf8"))).hexdigest()
def make_tests(script_dir, out_dir, input_file_name, test_data):
tests = []
innerHTML_tests = []
ids_seen = {}
print input_file_name
for test in test_data:
if "script-off" in test:
continue
is_innerHTML = "document-fragment" in test
data = test["data"]
container = test["document-fragment"] if is_innerHTML else None
assert test["document"], test
expected = get_expected(test["document"])
test_list = innerHTML_tests if is_innerHTML else tests
test_id = get_hash(data, container)
if test_id in ids_seen:
print "WARNING: id %s seen multiple times in file %s this time for test (%s, %s) before for test %s, skipping"%(test_id, input_file_name, container, data, ids_seen[test_id])
continue
ids_seen[test_id] = (container, data)
test_list.append({'string_uri_encoded_input':"\"%s\""%urllib.quote(data.encode("utf8")),
'input':data,
'expected':expected,
'string_escaped_expected':json.dumps(urllib.quote(expected.encode("utf8"))),
'id':test_id,
'container':container
})
path_normal = None
if tests:
path_normal = write_test_file(script_dir, out_dir,
tests, "html5lib_%s"%input_file_name,
"html5lib_test.xml")
path_innerHTML = None
if innerHTML_tests:
path_innerHTML = write_test_file(script_dir, out_dir,
innerHTML_tests, "html5lib_innerHTML_%s"%input_file_name,
"html5lib_test_fragment.xml")
return path_normal, path_innerHTML
def write_test_file(script_dir, out_dir, tests, file_name, template_file_name):
file_name = os.path.join(out_dir, file_name + ".html")
short_name = os.path.split(file_name)[1]
with open(os.path.join(script_dir, template_file_name), "r") as f:
template = MarkupTemplate(f)
stream = template.generate(file_name=short_name, tests=tests)
with open(file_name, "w") as f:
f.write(stream.render('html', doctype='html5',
encoding="utf8"))
return file_name
def escape_js_string(in_data):
return in_data.encode("utf8").encode("string-escape")
def serialize_filenames(test_filenames):
return "[" + ",\n".join("\"%s\""%item for item in test_filenames) + "]"
def main():
script_dir, out_dir = get_paths()
test_files = []
inner_html_files = []
if len(sys.argv) > 2:
test_iterator = itertools.izip(
itertools.repeat(False),
sorted(os.path.abspath(item) for item in
glob.glob(os.path.join(sys.argv[2], "*.dat"))))
else:
test_iterator = itertools.chain(
itertools.izip(itertools.repeat(False),
sorted(support.get_data_files("tree-construction"))),
itertools.izip(itertools.repeat(True),
sorted(support.get_data_files(
os.path.join("tree-construction", "scripted")))))
for (scripted, test_file) in test_iterator:
input_file_name = os.path.splitext(os.path.split(test_file)[1])[0]
if scripted:
input_file_name = "scripted_" + input_file_name
test_data = support.TestData(test_file)
test_filename, inner_html_file_name = make_tests(script_dir, out_dir,
input_file_name, test_data)
if test_filename is not None:
test_files.append(test_filename)
if inner_html_file_name is not None:
inner_html_files.append(inner_html_file_name)
if __name__ == "__main__":
main()
|
mpl-2.0
|
TaizoAyase/FSECplotter2
|
test/pyqt/dialogs/test_integrator_dialog.py
|
1
|
3057
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from glob import glob
from unittest import TestCase
from nose.tools import ok_, eq_, raises
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QTest
from FSECplotter import *
from FSECplotter.pyqt.models import LogfileModel
from FSECplotter.pyqt.dialogs import *
testfiles = glob('./test/fixture/shimadzu/test*.txt')
app = QtWidgets.QApplication(sys.argv)
class IntegratorDialogTestCase(TestCase):
def setUp(self):
self.model = LogfileModel(0, 6, None)
for i, f in enumerate(testfiles):
self.model.add_item(f)
# select Detector B and channel 2
self.model.item(i, 3).setText("B")
self.model.item(i, 4).setText("2")
self.form = IntegratorDialog(self.model)
def reset_form_empty(self):
self.form.ui.lineEdit.setText("")
self.form.ui.lineEdit_2.setText("")
self.form.integrate_accepted = False
def test_defaults(self):
eq_(self.form.ui.lineEdit.text(), "")
eq_(self.form.ui.lineEdit_2.text(), "")
def test_min_max_volume_inverse(self):
self.reset_form_empty()
self.form.ui.lineEdit.setText("2")
self.form.ui.lineEdit_2.setText("1")
okWidget = self.form.ui.buttonBox.button(self.form.ui.buttonBox.Ok)
QTest.mouseClick(okWidget, Qt.LeftButton)
ok_(not self.form.integrate_accepted)
def test_min_volume_empty(self):
self.reset_form_empty()
self.form.ui.lineEdit_2.setText("1")
okWidget = self.form.ui.buttonBox.button(self.form.ui.buttonBox.Ok)
QTest.mouseClick(okWidget, Qt.LeftButton)
ok_(not self.form.integrate_accepted)
def test_max_volume_empty(self):
self.reset_form_empty()
self.form.ui.lineEdit.setText("1")
okWidget = self.form.ui.buttonBox.button(self.form.ui.buttonBox.Ok)
QTest.mouseClick(okWidget, Qt.LeftButton)
ok_(not self.form.integrate_accepted)
def test_volume_empty(self):
self.reset_form_empty()
okWidget = self.form.ui.buttonBox.button(self.form.ui.buttonBox.Ok)
QTest.mouseClick(okWidget, Qt.LeftButton)
ok_(not self.form.integrate_accepted)
def test_accept(self):
self.reset_form_empty()
self.form.ui.lineEdit.setText("0")
self.form.ui.lineEdit_2.setText("30")
# prevent showing the warning dialog
ok_button = self.form.plot_dialog.ok_button
QtCore.QTimer.singleShot(0, ok_button.clicked)
# close the child dialog
okWidget = self.form.ui.buttonBox.button(self.form.ui.buttonBox.Ok)
QTest.mouseClick(okWidget, Qt.LeftButton)
ok_(self.form.integrate_accepted)
eq_(self.form.min_volume, 0)
eq_(self.form.max_volume, 30)
ok_(self.form.plot_dialog.filenames)
ok_(self.form.plot_dialog.intensities)
def tearDown(self):
del self.form
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
nmrao/robotframework
|
src/robot/testdoc.py
|
11
|
9803
|
#!/usr/bin/env python
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for the `Testdoc` tool.
This module can be executed from the command line using the following
approaches::
python -m robot.testdoc
python path/to/robot/testdoc.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module also provides :func:`testdoc` and :func:`testdoc_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
USAGE = """robot.testdoc -- Robot Framework test data documentation tool
Version: <VERSION>
Usage: python -m robot.testdoc [options] data_sources output_file
Testdoc generates a high level test documentation based on Robot Framework
test data. Generated documentation includes name, documentation and other
metadata of each test suite and test case, as well as the top-level keywords
and their arguments.
Options
=======
-T --title title Set the title of the generated documentation.
Underscores in the title are converted to spaces.
The default title is the name of the top level suite.
-N --name name Override the name of the top level suite.
-D --doc document Override the documentation of the top level suite.
-M --metadata name:value * Set/override metadata of the top level suite.
-G --settag tag * Set given tag(s) to all test cases.
-t --test name * Include tests by name.
-s --suite name * Include suites by name.
-i --include tag * Include tests by tags.
-e --exclude tag * Exclude tests by tags.
-h -? --help Print this help.
All options except --title have exactly same semantics as same options have
when executing test cases.
Execution
=========
Data can be given as a single file, directory, or as multiple files and
directories. In all these cases, the last argument must be the file where
to write the output. The output is always created in HTML format.
Testdoc works with all interpreters supported by Robot Framework (Python,
Jython and IronPython). It can be executed as an installed module like
`python -m robot.testdoc` or as a script like `python path/robot/testdoc.py`.
Examples:
python -m robot.testdoc my_test.html testdoc.html
jython -m robot.testdoc -N smoke_tests -i smoke path/to/my_tests smoke.html
ipy path/to/robot/testdoc.py first_suite.txt second_suite.txt output.html
For more information about Testdoc and other built-in tools, see
http://robotframework.org/robotframework/#built-in-tools.
"""
import os.path
import sys
import time
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.conf import RobotSettings
from robot.htmldata import HtmlFileWriter, ModelWriter, JsonWriter, TESTDOC
from robot.parsing import disable_curdir_processing
from robot.running import TestSuiteBuilder
from robot.utils import (abspath, Application, format_time, get_link_path,
html_escape, html_format, is_string,
secs_to_timestr, seq2str2, timestr_to_secs, unescape)
class TestDoc(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(2,))
def main(self, datasources, title=None, **options):
outfile = abspath(datasources.pop())
suite = TestSuiteFactory(datasources, **options)
self._write_test_doc(suite, outfile, title)
self.console(outfile)
def _write_test_doc(self, suite, outfile, title):
with open(outfile, 'w') as output:
model_writer = TestdocModelWriter(output, suite, title)
HtmlFileWriter(output, model_writer).write(TESTDOC)
@disable_curdir_processing
def TestSuiteFactory(datasources, **options):
settings = RobotSettings(options)
if is_string(datasources):
datasources = [datasources]
suite = TestSuiteBuilder().build(*datasources)
suite.configure(**settings.suite_config)
return suite
class TestdocModelWriter(ModelWriter):
def __init__(self, output, suite, title=None):
self._output = output
self._output_path = getattr(output, 'name', None)
self._suite = suite
self._title = title.replace('_', ' ') if title else suite.name
def write(self, line):
self._output.write('<script type="text/javascript">\n')
self.write_data()
self._output.write('</script>\n')
def write_data(self):
generated_time = time.localtime()
model = {
'suite': JsonConverter(self._output_path).convert(self._suite),
'title': self._title,
'generated': format_time(generated_time, gmtsep=' '),
'generatedMillis': long(time.mktime(generated_time) * 1000)
}
JsonWriter(self._output).write_json('testdoc = ', model)
class JsonConverter(object):
def __init__(self, output_path=None):
self._output_path = output_path
def convert(self, suite):
return self._convert_suite(suite)
def _convert_suite(self, suite):
return {
'source': suite.source or '',
'relativeSource': self._get_relative_source(suite.source),
'id': suite.id,
'name': self._escape(suite.name),
'fullName': self._escape(suite.longname),
'doc': self._html(suite.doc),
'metadata': [(self._escape(name), self._html(value))
for name, value in suite.metadata.items()],
'numberOfTests': suite.test_count ,
'suites': self._convert_suites(suite),
'tests': self._convert_tests(suite),
'keywords': list(self._convert_keywords(suite))
}
def _get_relative_source(self, source):
if not source or not self._output_path:
return ''
return get_link_path(source, os.path.dirname(self._output_path))
def _escape(self, item):
return html_escape(item)
def _html(self, item):
return html_format(unescape(item))
def _convert_suites(self, suite):
return [self._convert_suite(s) for s in suite.suites]
def _convert_tests(self, suite):
return [self._convert_test(t) for t in suite.tests]
def _convert_test(self, test):
return {
'name': self._escape(test.name),
'fullName': self._escape(test.longname),
'id': test.id,
'doc': self._html(test.doc),
'tags': [self._escape(t) for t in test.tags],
'timeout': self._get_timeout(test.timeout),
'keywords': list(self._convert_keywords(test))
}
def _convert_keywords(self, item):
for kw in getattr(item, 'keywords', []):
if kw.type == kw.SETUP_TYPE:
yield self._convert_keyword(kw, 'SETUP')
elif kw.type == kw.TEARDOWN_TYPE:
yield self._convert_keyword(kw, 'TEARDOWN')
elif kw.type == kw.FOR_LOOP_TYPE:
yield self._convert_for_loop(kw)
else:
yield self._convert_keyword(kw, 'KEYWORD')
def _convert_for_loop(self, kw):
return {
'name': self._escape(self._get_for_loop(kw)),
'arguments': '',
'type': 'FOR'
}
def _convert_keyword(self, kw, kw_type):
return {
'name': self._escape(self._get_kw_name(kw)),
'arguments': self._escape(', '.join(kw.args)),
'type': kw_type
}
def _get_kw_name(self, kw):
if kw.assign:
return '%s = %s' % (', '.join(a.rstrip('= ') for a in kw.assign), kw.name)
return kw.name
def _get_for_loop(self, kw):
joiner = ' %s ' % kw.flavor
return ', '.join(kw.variables) + joiner + seq2str2(kw.values)
def _get_timeout(self, timeout):
if timeout is None:
return ''
try:
tout = secs_to_timestr(timestr_to_secs(timeout.value))
except ValueError:
tout = timeout.value
if timeout.message:
tout += ' :: ' + timeout.message
return tout
def testdoc_cli(arguments):
"""Executes `Testdoc` similarly as from the command line.
:param arguments: command line arguments as a list of strings.
For programmatic usage the :func:`testdoc` function is typically better. It
has a better API for that and does not call :func:`sys.exit` like
this function.
Example::
from robot.testdoc import testdoc_cli
testdoc_cli(['--title', 'Test Plan', 'mytests', 'plan.html'])
"""
TestDoc().execute_cli(arguments)
def testdoc(*arguments, **options):
"""Executes `Testdoc` programmatically.
Arguments and options have same semantics, and options have same names,
as arguments and options to Testdoc.
Example::
from robot.testdoc import testdoc
testdoc('mytests', 'plan.html', title='Test Plan')
"""
TestDoc().execute(*arguments, **options)
if __name__ == '__main__':
testdoc_cli(sys.argv[1:])
|
apache-2.0
|
decause/pyglet-remy
|
tests/image/CHECKERBOARD.py
|
33
|
1163
|
#!/usr/bin/env python
'''Test that the checkerboard pattern looks correct.
One window will open, it should show one instance of the checkerboard
pattern in two levels of grey.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet.gl import *
from pyglet import image
from pyglet.window import *
from pyglet.window.event import *
from tests.regression import ImageRegressionTestCase
class TEST_CHECKERBOARD(ImageRegressionTestCase):
has_exit = False
def on_expose(self):
glClearColor(1, 1, 1, 1)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
self.texture.blit(0, 0, 0)
self.window.flip()
if self.capture_regression_image():
self.has_exit = True
def test_main(self):
width, height = 200, 200
self.window = w = Window(width, height, visible=False)
w.push_handlers(self)
self.texture = image.create(32, 32, image.CheckerImagePattern()).texture
w.set_visible()
while not (w.has_exit or self.has_exit):
w.dispatch_events()
w.close()
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
GovReady/readthedocs.org
|
readthedocs/oauth/utils.py
|
4
|
7942
|
import logging
from allauth.socialaccount.models import SocialToken
from django.conf import settings
from requests_oauthlib import OAuth1Session, OAuth2Session
from .models import GithubProject, GithubOrganization, BitbucketProject, BitbucketTeam
from readthedocs.restapi.client import api
log = logging.getLogger(__name__)
def get_oauth_session(user, provider):
tokens = SocialToken.objects.filter(
account__user__username=user.username, app__provider=provider)
if tokens.exists():
token = tokens[0]
else:
return None
if provider == 'github':
session = OAuth2Session(
client_id=token.app.client_id,
token={
'access_token': str(token.token),
'token_type': 'bearer'
}
)
elif provider == 'bitbucket':
session = OAuth1Session(
token.app.client_id,
client_secret=token.app.secret,
resource_owner_key=token.token,
resource_owner_secret=token.token_secret
)
return session or None
def make_github_project(user, org, privacy, repo_json):
log.info('Trying GitHub: %s' % repo_json['full_name'])
if (repo_json['private'] is True and privacy == 'private' or
repo_json['private'] is False and privacy == 'public'):
project, created = GithubProject.objects.get_or_create(
full_name=repo_json['full_name'],
users__pk=user.pk,
)
if project.organization and project.organization != org:
log.debug('Not importing %s because mismatched orgs' % repo_json['name'])
return None
else:
project.organization = org
project.users.add(user)
project.name = repo_json['name']
project.description = repo_json['description']
project.git_url = repo_json['git_url']
project.ssh_url = repo_json['ssh_url']
project.html_url = repo_json['html_url']
project.json = repo_json
project.save()
return project
else:
log.debug('Not importing %s because mismatched type' % repo_json['name'])
def make_github_organization(user, org_json):
org, created = GithubOrganization.objects.get_or_create(
login=org_json.get('login'),
)
org.html_url = org_json.get('html_url')
org.name = org_json.get('name')
org.email = org_json.get('email')
org.json = org_json
org.users.add(user)
org.save()
return org
def get_token_for_project(project, force_local=False):
if not getattr(settings, 'ALLOW_PRIVATE_REPOS', False):
return None
token = None
try:
if getattr(settings, 'DONT_HIT_DB', True) and not force_local:
token = api.project(project.pk).token().get()['token']
else:
for user in project.users.all():
tokens = SocialToken.objects.filter(
account__user__username=user.username,
app__provider='github')
if tokens.exists():
token = tokens[0].token
except Exception:
log.error('Failed to get token for user', exc_info=True)
return token
def github_paginate(session, url):
"""
Scans trough all github paginates results and returns the concatenated
list of results.
:param session: requests client instance
:param url: start url to get the data from.
See https://developer.github.com/v3/#pagination
"""
result = []
while url:
r = session.get(url)
result.extend(r.json())
next = r.links.get('next')
if next:
url = next.get('url')
else:
url = None
return result
def import_github(user, sync):
""" Do the actual github import """
repo_type = getattr(settings, 'GITHUB_PRIVACY', 'public')
session = get_oauth_session(user, provider='github')
if sync and session:
# Get user repos
owner_resp = github_paginate(session, 'https://api.github.com/user/repos?per_page=100')
try:
for repo in owner_resp:
make_github_project(user=user, org=None, privacy=repo_type, repo_json=repo)
except TypeError, e:
print e
# Get org repos
try:
resp = session.get('https://api.github.com/user/orgs')
for org_json in resp.json():
org_resp = session.get('https://api.github.com/orgs/%s' % org_json['login'])
org_obj = make_github_organization(user=user, org_json=org_resp.json())
# Add repos
org_repos_resp = github_paginate(
session,
'https://api.github.com/orgs/%s/repos?per_page=100' % (
org_json['login']))
for repo in org_repos_resp:
make_github_project(user=user, org=org_obj, privacy=repo_type, repo_json=repo)
except TypeError, e:
print e
return session is not None
###
# Bitbucket
###
def bitbucket_paginate(session, url):
"""
Scans trough all github paginates results and returns the concatenated
list of results.
:param session: requests client instance
:param url: start url to get the data from.
"""
result = []
while url:
r = session.get(url)
result.extend([r.json()])
next_url = r.json().get('next')
if next_url:
url = next_url
else:
url = None
return result
def make_bitbucket_project(user, org, privacy, repo_json):
log.info('Trying Bitbucket: %s' % repo_json['full_name'])
if (repo_json['is_private'] is True and privacy == 'private' or
repo_json['is_private'] is False and privacy == 'public'):
project, created = BitbucketProject.objects.get_or_create(
full_name=repo_json['full_name'],
)
if project.organization and project.organization != org:
log.debug('Not importing %s because mismatched orgs' % repo_json['name'])
return None
else:
project.organization = org
project.users.add(user)
project.name = repo_json['name']
project.description = repo_json['description']
project.git_url = repo_json['links']['clone'][0]['href']
project.ssh_url = repo_json['links']['clone'][1]['href']
project.html_url = repo_json['links']['html']['href']
project.vcs = repo_json['scm']
project.json = repo_json
project.save()
return project
else:
log.debug('Not importing %s because mismatched type' % repo_json['name'])
def process_bitbucket_json(user, json, repo_type):
try:
for page in json:
for repo in page['values']:
make_bitbucket_project(user=user, org=None, privacy=repo_type, repo_json=repo)
except TypeError, e:
print e
def import_bitbucket(user, sync):
""" Do the actual github import """
repo_type = getattr(settings, 'GITHUB_PRIVACY', 'public')
session = get_oauth_session(user, provider='bitbucket')
if sync and session:
# Get user repos
try:
owner_resp = bitbucket_paginate(
session,
'https://bitbucket.org/api/2.0/repositories/{owner}'.format(
owner=user.username))
process_bitbucket_json(user, owner_resp, repo_type)
except TypeError, e:
print e
# Get org repos
resp = session.get('https://bitbucket.org/api/1.0/user/privileges/')
for team in resp.json()['teams'].keys():
org_resp = bitbucket_paginate(
session,
'https://bitbucket.org/api/2.0/teams/{team}/repositories'.format(
team=team))
process_bitbucket_json(user, org_resp, repo_type)
return session is not None
|
mit
|
amisrs/angular-flask
|
angular_flask/lib/python2.7/site-packages/requests/compat.py
|
289
|
2433
|
# -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import charade as chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
|
mit
|
udo-tech-team/shadowsocks-1
|
shadowsocks/shell.py
|
652
|
12736
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None) \
and not config.get('manager_address'):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if config.get('server_port', None) and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', None)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
|
apache-2.0
|
mohamed-ali/scikit-tensor
|
sktensor/pyutils.py
|
4
|
1611
|
def inherit_docstring_from(cls):
def docstring_inheriting_decorator(fn):
fn.__doc__ = getattr(cls, fn.__name__).__doc__
return fn
return docstring_inheriting_decorator
def is_sequence(obj):
"""
Helper function to determine sequences
across Python 2.x and 3.x
"""
try:
from collections import Sequence
except ImportError:
from operator import isSequenceType
return isSequenceType(obj)
else:
return isinstance(obj, Sequence)
def is_number(obj):
"""
Helper function to determine numbers
across Python 2.x and 3.x
"""
try:
from numbers import Number
except ImportError:
from operator import isNumberType
return isNumberType(obj)
else:
return isinstance(obj, Number)
def func_attr(f, attr):
"""
Helper function to get the attribute of a function
like, name, code, defaults across Python 2.x and 3.x
"""
if hasattr(f, 'func_%s' % attr):
return getattr(f, 'func_%s' % attr)
elif hasattr(f, '__%s__' % attr):
return getattr(f, '__%s__' % attr)
else:
raise ValueError('Object %s has no attr' % (str(f), attr))
def from_to_without(frm, to, without, step=1, skip=1, reverse=False, separate=False):
"""
Helper function to create ranges with missing entries
"""
if reverse:
frm, to = (to - 1), (frm - 1)
step *= -1
skip *= -1
a = list(range(frm, without, step))
b = list(range(without + skip, to, step))
if separate:
return a, b
else:
return a + b
|
gpl-3.0
|
davidharrigan/django
|
tests/m2m_multiple/tests.py
|
227
|
2370
|
from __future__ import unicode_literals
from datetime import datetime
from django.test import TestCase
from .models import Article, Category
class M2MMultipleTests(TestCase):
def test_multiple(self):
c1, c2, c3, c4 = [
Category.objects.create(name=name)
for name in ["Sports", "News", "Crime", "Life"]
]
a1 = Article.objects.create(
headline="Parrot steals", pub_date=datetime(2005, 11, 27)
)
a1.primary_categories.add(c2, c3)
a1.secondary_categories.add(c4)
a2 = Article.objects.create(
headline="Parrot runs", pub_date=datetime(2005, 11, 28)
)
a2.primary_categories.add(c1, c2)
a2.secondary_categories.add(c4)
self.assertQuerysetEqual(
a1.primary_categories.all(), [
"Crime",
"News",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a2.primary_categories.all(), [
"News",
"Sports",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a1.secondary_categories.all(), [
"Life",
],
lambda c: c.name
)
self.assertQuerysetEqual(
c1.primary_article_set.all(), [
"Parrot runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c1.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c2.primary_article_set.all(), [
"Parrot steals",
"Parrot runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c2.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c3.primary_article_set.all(), [
"Parrot steals",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c3.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.primary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.secondary_article_set.all(), [
"Parrot steals",
"Parrot runs",
],
lambda a: a.headline
)
|
bsd-3-clause
|
harmy/kbengine
|
kbe/src/lib/python/Lib/test/test_pow.py
|
177
|
4593
|
import test.support, unittest
class PowTest(unittest.TestCase):
def powtest(self, type):
if type != float:
for i in range(-1000, 1000):
self.assertEqual(pow(type(i), 0), 1)
self.assertEqual(pow(type(i), 1), type(i))
self.assertEqual(pow(type(0), 1), type(0))
self.assertEqual(pow(type(1), 1), type(1))
for i in range(-100, 100):
self.assertEqual(pow(type(i), 3), i*i*i)
pow2 = 1
for i in range(0, 31):
self.assertEqual(pow(2, i), pow2)
if i != 30 : pow2 = pow2*2
for othertype in (int,):
for i in list(range(-10, 0)) + list(range(1, 10)):
ii = type(i)
for j in range(1, 11):
jj = -othertype(j)
pow(ii, jj)
for othertype in int, float:
for i in range(1, 100):
zero = type(0)
exp = -othertype(i/10.0)
if exp == 0:
continue
self.assertRaises(ZeroDivisionError, pow, zero, exp)
il, ih = -20, 20
jl, jh = -5, 5
kl, kh = -10, 10
asseq = self.assertEqual
if type == float:
il = 1
asseq = self.assertAlmostEqual
elif type == int:
jl = 0
elif type == int:
jl, jh = 0, 15
for i in range(il, ih+1):
for j in range(jl, jh+1):
for k in range(kl, kh+1):
if k != 0:
if type == float or j < 0:
self.assertRaises(TypeError, pow, type(i), j, k)
continue
asseq(
pow(type(i),j,k),
pow(type(i),j)% type(k)
)
def test_powint(self):
self.powtest(int)
def test_powlong(self):
self.powtest(int)
def test_powfloat(self):
self.powtest(float)
def test_other(self):
# Other tests-- not very systematic
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
self.assertEqual(pow(3,3) % 8, pow(3,3,8))
self.assertEqual(pow(3,3) % -8, pow(3,3,-8))
self.assertEqual(pow(3,2) % -2, pow(3,2,-2))
self.assertEqual(pow(-3,3) % 8, pow(-3,3,8))
self.assertEqual(pow(-3,3) % -8, pow(-3,3,-8))
self.assertEqual(pow(5,2) % -8, pow(5,2,-8))
for i in range(-10, 11):
for j in range(0, 6):
for k in range(-7, 11):
if j >= 0 and k != 0:
self.assertEqual(
pow(i,j) % k,
pow(i,j,k)
)
if j >= 0 and k != 0:
self.assertEqual(
pow(int(i),j) % k,
pow(int(i),j,k)
)
def test_bug643260(self):
class TestRpow:
def __rpow__(self, other):
return None
None ** TestRpow() # Won't fail when __rpow__ invoked. SF bug #643260.
def test_bug705231(self):
# -1.0 raised to an integer should never blow up. It did if the
# platform pow() was buggy, and Python didn't worm around it.
eq = self.assertEqual
a = -1.0
# The next two tests can still fail if the platform floor()
# function doesn't treat all large inputs as integers
# test_math should also fail if that is happening
eq(pow(a, 1.23e167), 1.0)
eq(pow(a, -1.23e167), 1.0)
for b in range(-10, 11):
eq(pow(a, float(b)), b & 1 and -1.0 or 1.0)
for n in range(0, 100):
fiveto = float(5 ** n)
# For small n, fiveto will be odd. Eventually we run out of
# mantissa bits, though, and thereafer fiveto will be even.
expected = fiveto % 2.0 and -1.0 or 1.0
eq(pow(a, fiveto), expected)
eq(pow(a, -fiveto), expected)
eq(expected, 1.0) # else we didn't push fiveto to evenness
def test_main():
test.support.run_unittest(PowTest)
if __name__ == "__main__":
test_main()
|
lgpl-3.0
|
mellis13/moose
|
framework/contrib/nsiqcppstyle/rules/RULE_6_5_B_do_not_use_macro_for_constants.py
|
43
|
2094
|
"""
Do not use macro for the constants.
if the constants is defined by macro. this rule reports a violation.
Instead, use enum or const variables.
However, it's ok to write a macro function.
And.. If the macro is start with underbar,
it regards this macro is defined for the special purpose
and it doesn't report a violation on it.
== Violation ==
#define KK 1 <== Violation
#define TT "sds" <== Violation
== Good ==
#define KK(A) (A)*3 <== Don't care. It's macro function
const int k = 3; <== OK
const char *t = "EWEE"; <== OK
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, contextStack) :
t = lexer.GetCurToken()
if t.type == "PREPROCESSOR" and t.value.find("define") != -1 :
d = lexer.GetNextTokenSkipWhiteSpaceAndComment()
k2 = lexer.GetNextTokenSkipWhiteSpaceAndComment()
if d.type == "ID" and k2 != None and k2.type in ["NUMBER", "STRING", "CHARACTOR"] and d.lineno == k2.lineno :
if not Search("^_", d.value) :
nsiqcppstyle_reporter.Error(d, __name__,
"Do not use macro(%s) for constant" % d.value)
ruleManager.AddPreprocessRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddPreprocessRule(RunRule)
def test1(self):
self.Analyze("thisfile.c","""
#define k 1
""")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("thisfile.c","""
#define tt(A) 3
""")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("thisfile.c","""
# define t "ewew"
""")
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("thisfile.c","""
# define _t "ewew"
""")
assert not CheckErrorContent(__name__)
|
lgpl-2.1
|
ankitrgadiya/cs50
|
project/miki/miki/__init__.py
|
1
|
1049
|
import os
from flask import Flask, redirect
def create_app(test_config=None):
"""
Application Factory function
"""
# Create app instance and configure it
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'miki.db'),
SOURCE=os.path.join(app.instance_path, 'source/'),
OUTPUT=os.path.join(app.instance_path, 'output/')
)
# App test_config is passed
if test_config is None:
app.config.from_pyfile('config.py', silent=True)
else:
app.config.update(test_config)
# Make sure directories exist
try:
os.makedirs(app.config['SOURCE'])
os.makedirs(app.config['OUTPUT'])
except OSError:
pass
# Register database commands
from miki import db
db.register(app)
# Register blueprints
from miki import auth, edit
app.register_blueprint(auth.bp)
app.register_blueprint(edit.bp)
# Return app instance
return app
|
gpl-3.0
|
MobinRanjbar/hue
|
desktop/core/ext-py/jdcal-1.0/jdcal.py
|
19
|
14903
|
# -*- coding:utf-8 -*-
"""Functions for converting between Julian dates and calendar dates.
A function for converting Gregorian calendar dates to Julian dates, and
another function for converting Julian calendar dates to Julian dates
are defined. Two functions for the reverse calculations are also
defined.
Different regions of the world switched to Gregorian calendar from
Julian calendar on different dates. Having separate functions for Julian
and Gregorian calendars allow maximum flexibility in choosing the
relevant calendar.
All the above functions are "proleptic". This means that they work for
dates on which the concerned calendar is not valid. For example,
Gregorian calendar was not used prior to around October 1582.
Julian dates are stored in two floating point numbers (double). Julian
dates, and Modified Julian dates, are large numbers. If only one number
is used, then the precision of the time stored is limited. Using two
numbers, time can be split in a manner that will allow maximum
precision. For example, the first number could be the Julian date for
the beginning of a day and the second number could be the fractional
day. Calculations that need the latter part can now work with maximum
precision.
A function to test if a given Gregorian calendar year is a leap year is
defined.
Zero point of Modified Julian Date (MJD) and the MJD of 2000/1/1
12:00:00 are also given.
This module is based on the TPM C library, by Jeffery W. Percival. The
idea for splitting Julian date into two floating point numbers was
inspired by the IAU SOFA C library.
:author: Prasanth Nair
:contact: [email protected]
:license: BSD (http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import division
from __future__ import print_function
import math
__version__ = "1.0"
MJD_0 = 2400000.5
MJD_JD2000 = 51544.5
def fpart(x):
"""Return fractional part of given number."""
return math.modf(x)[0]
def ipart(x):
"""Return integer part of given number."""
return math.modf(x)[1]
def is_leap(year):
"""Leap year or not in the Gregorian calendar."""
x = math.fmod(year, 4)
y = math.fmod(year, 100)
z = math.fmod(year, 400)
# Divisible by 4 and,
# either not divisible by 100 or divisible by 400.
return not x and (y or not z)
def gcal2jd(year, month, day):
"""Gregorian calendar date to Julian date.
The input and output are for the proleptic Gregorian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd1, jd2: 2-element tuple of floats
When added together, the numbers give the Julian date for the
given Gregorian calendar date. The first number is always
MJD_0 i.e., 2451545.5. So the second is the MJD.
Examples
--------
>>> gcal2jd(2000,1,1)
(2400000.5, 51544.0)
>>> 2400000.5 + 51544.0 + 0.5
2451545.0
>>> year = [-4699, -2114, -1050, -123, -1, 0, 1, 123, 1678.0, 2000,
....: 2012, 2245]
>>> month = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
>>> day = [1, 12, 23, 14, 25, 16, 27, 8, 9, 10, 11, 31]
>>> x = [gcal2jd(y, m, d) for y, m, d in zip(year, month, day)]
>>> for i in x: print i
(2400000.5, -2395215.0)
(2400000.5, -1451021.0)
(2400000.5, -1062364.0)
(2400000.5, -723762.0)
(2400000.5, -679162.0)
(2400000.5, -678774.0)
(2400000.5, -678368.0)
(2400000.5, -633797.0)
(2400000.5, -65812.0)
(2400000.5, 51827.0)
(2400000.5, 56242.0)
(2400000.5, 141393.0)
Negative months and days are valid. For example, 2000/-2/-4 =>
1999/+12-2/-4 => 1999/10/-4 => 1999/9/30-4 => 1999/9/26.
>>> gcal2jd(2000, -2, -4)
(2400000.5, 51447.0)
>>> gcal2jd(1999, 9, 26)
(2400000.5, 51447.0)
>>> gcal2jd(2000, 2, -1)
(2400000.5, 51573.0)
>>> gcal2jd(2000, 1, 30)
(2400000.5, 51573.0)
>>> gcal2jd(2000, 3, -1)
(2400000.5, 51602.0)
>>> gcal2jd(2000, 2, 28)
(2400000.5, 51602.0)
Month 0 becomes previous month.
>>> gcal2jd(2000, 0, 1)
(2400000.5, 51513.0)
>>> gcal2jd(1999, 12, 1)
(2400000.5, 51513.0)
Day number 0 becomes last day of previous month.
>>> gcal2jd(2000, 3, 0)
(2400000.5, 51603.0)
>>> gcal2jd(2000, 2, 29)
(2400000.5, 51603.0)
If `day` is greater than the number of days in `month`, then it
gets carried over to the next month.
>>> gcal2jd(2000,2,30)
(2400000.5, 51604.0)
>>> gcal2jd(2000,3,1)
(2400000.5, 51604.0)
>>> gcal2jd(2001,2,30)
(2400000.5, 51970.0)
>>> gcal2jd(2001,3,2)
(2400000.5, 51970.0)
Notes
-----
The returned Julian date is for mid-night of the given date. To
find the Julian date for any time of the day, simply add time as a
fraction of a day. For example Julian date for mid-day can be
obtained by adding 0.5 to either the first part or the second
part. The latter is preferable, since it will give the MJD for the
date and time.
BC dates should be given as -(BC - 1) where BC is the year. For
example 1 BC == 0, 2 BC == -1, and so on.
Negative numbers can be used for `month` and `day`. For example
2000, -1, 1 is the same as 1999, 11, 1.
The Julian dates are proleptic Julian dates, i.e., values are
returned without considering if Gregorian dates are valid for the
given date.
The input values are truncated to integers.
"""
year = int(year)
month = int(month)
day = int(day)
a = ipart((month - 14) / 12.0)
jd = ipart((1461 * (year + 4800 + a)) / 4.0)
jd += ipart((367 * (month - 2 - 12 * a)) / 12.0)
x = ipart((year + 4900 + a) / 100.0)
jd -= ipart((3 * x) / 4.0)
jd += day - 2432075.5 # was 32075; add 2400000.5
jd -= 0.5 # 0 hours; above JD is for midday, switch to midnight.
return MJD_0, jd
def jd2gcal(jd1, jd2):
"""Julian date to Gregorian calendar date and time of day.
The input and output are for the proleptic Gregorian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
jd1, jd2: int
Sum of the two numbers is taken as the given Julian date. For
example `jd1` can be the zero point of MJD (MJD_0) and `jd2`
can be the MJD of the date and time. But any combination will
work.
Returns
-------
y, m, d, f : int, int, int, float
Four element tuple containing year, month, day and the
fractional part of the day in the Gregorian calendar. The first
three are integers, and the last part is a float.
Examples
--------
>>> jd2gcal(*gcal2jd(2000,1,1))
(2000, 1, 1, 0.0)
>>> jd2gcal(*gcal2jd(1950,1,1))
(1950, 1, 1, 0.0)
Out of range months and days are carried over to the next/previous
year or next/previous month. See gcal2jd for more examples.
>>> jd2gcal(*gcal2jd(1999,10,12))
(1999, 10, 12, 0.0)
>>> jd2gcal(*gcal2jd(2000,2,30))
(2000, 3, 1, 0.0)
>>> jd2gcal(*gcal2jd(-1999,10,12))
(-1999, 10, 12, 0.0)
>>> jd2gcal(*gcal2jd(2000, -2, -4))
(1999, 9, 26, 0.0)
>>> gcal2jd(2000,1,1)
(2400000.5, 51544.0)
>>> jd2gcal(2400000.5, 51544.0)
(2000, 1, 1, 0.0)
>>> jd2gcal(2400000.5, 51544.5)
(2000, 1, 1, 0.5)
>>> jd2gcal(2400000.5, 51544.245)
(2000, 1, 1, 0.24500000000261934)
>>> jd2gcal(2400000.5, 51544.1)
(2000, 1, 1, 0.099999999998544808)
>>> jd2gcal(2400000.5, 51544.75)
(2000, 1, 1, 0.75)
Notes
-----
The last element of the tuple is the same as
(hh + mm / 60.0 + ss / 3600.0) / 24.0
where hh, mm, and ss are the hour, minute and second of the day.
See Also
--------
gcal2jd
"""
from math import modf
jd1_f, jd1_i = modf(jd1)
jd2_f, jd2_i = modf(jd2)
jd_i = jd1_i + jd2_i
f = jd1_f + jd2_f
# Set JD to noon of the current date. Fractional part is the
# fraction from midnight of the current date.
if -0.5 < f < 0.5:
f += 0.5
elif f >= 0.5:
jd_i += 1
f -= 0.5
elif f <= -0.5:
jd_i -= 1
f += 1.5
l = jd_i + 68569
n = ipart((4 * l) / 146097.0)
l -= ipart(((146097 * n) + 3) / 4.0)
i = ipart((4000 * (l + 1)) / 1461001)
l -= ipart((1461 * i) / 4.0) - 31
j = ipart((80 * l) / 2447.0)
day = l - ipart((2447 * j) / 80.0)
l = ipart(j / 11.0)
month = j + 2 - (12 * l)
year = 100 * (n - 49) + i + l
return int(year), int(month), int(day), f
def jcal2jd(year, month, day):
"""Julian calendar date to Julian date.
The input and output are for the proleptic Julian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd1, jd2: 2-element tuple of floats
When added together, the numbers give the Julian date for the
given Julian calendar date. The first number is always
MJD_0 i.e., 2451545.5. So the second is the MJD.
Examples
--------
>>> jcal2jd(2000, 1, 1)
(2400000.5, 51557.0)
>>> year = [-4699, -2114, -1050, -123, -1, 0, 1, 123, 1678, 2000,
...: 2012, 2245]
>>> month = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12]
>>> day = [1, 12, 23, 14, 25, 16, 27, 8, 9, 10, 11, 31]
>>> x = [jcal2jd(y, m, d) for y, m, d in zip(year, month, day)]
>>> for i in x: print i
(2400000.5, -2395252.0)
(2400000.5, -1451039.0)
(2400000.5, -1062374.0)
(2400000.5, -723765.0)
(2400000.5, -679164.0)
(2400000.5, -678776.0)
(2400000.5, -678370.0)
(2400000.5, -633798.0)
(2400000.5, -65772.0)
(2400000.5, 51871.0)
(2400000.5, 56285.0)
Notes
-----
Unlike `gcal2jd`, negative months and days can result in incorrect
Julian dates.
"""
year = int(year)
month = int(month)
day = int(day)
jd = 367 * year
x = ipart((month - 9) / 7.0)
jd -= ipart((7 * (year + 5001 + x)) / 4.0)
jd += ipart((275 * month) / 9.0)
jd += day
jd += 1729777 - 2400000.5 # Return 240000.5 as first part of JD.
jd -= 0.5 # Convert midday to midnight.
return MJD_0, jd
def jd2jcal(jd1, jd2):
"""Julian calendar date for the given Julian date.
The input and output are for the proleptic Julian calendar,
i.e., no consideration of historical usage of the calendar is
made.
Parameters
----------
jd1, jd2: int
Sum of the two numbers is taken as the given Julian date. For
example `jd1` can be the zero point of MJD (MJD_0) and `jd2`
can be the MJD of the date and time. But any combination will
work.
Returns
-------
y, m, d, f : int, int, int, float
Four element tuple containing year, month, day and the
fractional part of the day in the Julian calendar. The first
three are integers, and the last part is a float.
Examples
--------
>>> jd2jcal(*jcal2jd(2000, 1, 1))
(2000, 1, 1, 0.0)
>>> jd2jcal(*jcal2jd(-4000, 10, 11))
(-4000, 10, 11, 0.0)
>>> jcal2jd(2000, 1, 1)
(2400000.5, 51557.0)
>>> jd2jcal(2400000.5, 51557.0)
(2000, 1, 1, 0.0)
>>> jd2jcal(2400000.5, 51557.5)
(2000, 1, 1, 0.5)
>>> jd2jcal(2400000.5, 51557.245)
(2000, 1, 1, 0.24500000000261934)
>>> jd2jcal(2400000.5, 51557.1)
(2000, 1, 1, 0.099999999998544808)
>>> jd2jcal(2400000.5, 51557.75)
(2000, 1, 1, 0.75)
"""
from math import modf
jd1_f, jd1_i = modf(jd1)
jd2_f, jd2_i = modf(jd2)
jd_i = jd1_i + jd2_i
f = jd1_f + jd2_f
# Set JD to noon of the current date. Fractional part is the
# fraction from midnight of the current date.
if -0.5 < f < 0.5:
f += 0.5
elif f >= 0.5:
jd_i += 1
f -= 0.5
elif f <= -0.5:
jd_i -= 1
f += 1.5
j = jd_i + 1402.0
k = ipart((j - 1) / 1461.0)
l = j - (1461.0 * k)
n = ipart((l - 1) / 365.0) - ipart(l / 1461.0)
i = l - (365.0 * n) + 30.0
j = ipart((80.0 * i) / 2447.0)
day = i - ipart((2447.0 * j) / 80.0)
i = ipart(j / 11.0)
month = j + 2 - (12.0 * i)
year = (4 * k) + n + i - 4716.0
return int(year), int(month), int(day), f
# Some tests.
def _test_gcal2jd_with_sla_cldj():
"""Compare gcal2jd with slalib.sla_cldj."""
import random
try:
from pyslalib import slalib
except ImportError:
print("SLALIB (PySLALIB not available).")
return 1
n = 1000
mday = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# sla_cldj needs year > -4699 i.e., 4700 BC.
year = [random.randint(-4699, 2200) for i in range(n)]
month = [random.randint(1, 12) for i in range(n)]
day = [random.randint(1, 31) for i in range(n)]
for i in range(n):
x = 0
if is_leap(year[i]) and month[i] == 2:
x = 1
if day[i] > mday[month[i]] + x:
day[i] = mday[month[i]]
jd_jdc = [gcal2jd(y, m, d)[1]
for y, m, d in zip(year, month, day)]
jd_sla = [slalib.sla_cldj(y, m, d)[0]
for y, m, d in zip(year, month, day)]
diff = [abs(i - j) for i, j in zip(jd_sla, jd_jdc)]
assert max(diff) <= 1e-8
assert min(diff) <= 1e-8
def _test_jd2gcal():
"""Check jd2gcal as reverse of gcal2jd."""
import random
n = 1000
mday = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
year = [random.randint(-4699, 2200) for i in range(n)]
month = [random.randint(1, 12) for i in range(n)]
day = [random.randint(1, 31) for i in range(n)]
for i in range(n):
x = 0
if is_leap(year[i]) and month[i] == 2:
x = 1
if day[i] > mday[month[i]] + x:
day[i] = mday[month[i]]
jd = [gcal2jd(y, m, d)[1]
for y, m, d in zip(year, month, day)]
x = [jd2gcal(MJD_0, i) for i in jd]
for i in range(n):
assert x[i][0] == year[i]
assert x[i][1] == month[i]
assert x[i][2] == day[i]
assert x[i][3] <= 1e-15
def _test_jd2jcal():
"""Check jd2jcal as reverse of jcal2jd."""
import random
n = 1000
year = [random.randint(-4699, 2200) for i in range(n)]
month = [random.randint(1, 12) for i in range(n)]
day = [random.randint(1, 28) for i in range(n)]
jd = [jcal2jd(y, m, d)[1]
for y, m, d in zip(year, month, day)]
x = [jd2gcal(MJD_0, i) for i in jd]
for i in range(n):
assert x[i][0] == year[i]
assert x[i][1] == month[i]
assert x[i][2] == day[i]
assert x[i][3] <= 1e-15
|
apache-2.0
|
n0m4dz/odoo
|
addons/project/wizard/project_task_delegate.py
|
142
|
6479
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from openerp import tools
from openerp.tools.translate import _
from openerp.osv import fields, osv
class project_task_delegate(osv.osv_memory):
_name = 'project.task.delegate'
_description = 'Task Delegate'
_columns = {
'name': fields.char('Delegated Title', required=True, help="New title of the task delegated to the user"),
'prefix': fields.char('Your Task Title', help="Title for your validation task"),
'project_id': fields.many2one('project.project', 'Project', help="User you want to delegate this task to"),
'user_id': fields.many2one('res.users', 'Assign To', required=True, help="User you want to delegate this task to"),
'new_task_description': fields.text('New Task Description', help="Reinclude the description of the task in the task of the user"),
'planned_hours': fields.float('Planned Hours', help="Estimated time to close this task by the delegated user"),
'planned_hours_me': fields.float('Hours to Validate', help="Estimated time for you to validate the work done by the user to whom you delegate this task"),
'state': fields.selection([('pending','Pending'), ('done','Done'), ], 'Validation State', help="New state of your own task. Pending will be reopened automatically when the delegated task is closed")
}
def onchange_project_id(self, cr, uid, ids, project_id=False, context=None):
project_project = self.pool.get('project.project')
if not project_id:
return {'value':{'user_id': False}}
project = project_project.browse(cr, uid, project_id, context=context)
return {'value': {'user_id': project.user_id and project.user_id.id or False}}
def default_get(self, cr, uid, fields, context=None):
"""
This function gets default values
"""
res = super(project_task_delegate, self).default_get(cr, uid, fields, context=context)
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
if not record_id:
return res
task_pool = self.pool.get('project.task')
task = task_pool.browse(cr, uid, record_id, context=context)
task_name =tools.ustr(task.name)
if 'project_id' in fields:
res['project_id'] = int(task.project_id.id) if task.project_id else False
if 'name' in fields:
if task_name.startswith(_('CHECK: ')):
newname = tools.ustr(task_name).replace(_('CHECK: '), '')
else:
newname = tools.ustr(task_name or '')
res['name'] = newname
if 'planned_hours' in fields:
res['planned_hours'] = task.remaining_hours or 0.0
if 'prefix' in fields:
if task_name.startswith(_('CHECK: ')):
newname = tools.ustr(task_name).replace(_('CHECK: '), '')
else:
newname = tools.ustr(task_name or '')
prefix = _('CHECK: %s') % newname
res['prefix'] = prefix
if 'new_task_description' in fields:
res['new_task_description'] = task.description
return res
_defaults = {
'planned_hours_me': 1.0,
'state': 'pending',
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(project_task_delegate, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
users_pool = self.pool.get('res.users')
obj_tm = users_pool.browse(cr, uid, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
if tm in ['Hours','Hour']:
return res
eview = etree.fromstring(res['arch'])
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
for field in res['fields']:
if 'Hours' in res['fields'][field]['string']:
res['fields'][field]['string'] = res['fields'][field]['string'].replace('Hours',tm)
return res
def delegate(self, cr, uid, ids, context=None):
if context is None:
context = {}
task_id = context.get('active_id', False)
task_pool = self.pool.get('project.task')
delegate_data = self.read(cr, uid, ids, context=context)[0]
delegated_tasks = task_pool.do_delegate(cr, uid, [task_id], delegate_data, context=context)
models_data = self.pool.get('ir.model.data')
action_model, action_id = models_data.get_object_reference(cr, uid, 'project', 'action_view_task')
view_model, task_view_form_id = models_data.get_object_reference(cr, uid, 'project', 'view_task_form2')
view_model, task_view_tree_id = models_data.get_object_reference(cr, uid, 'project', 'view_task_tree2')
action = self.pool[action_model].read(cr, uid, [action_id], context=context)[0]
action['res_id'] = delegated_tasks[task_id]
action['view_id'] = False
action['views'] = [(task_view_form_id, 'form'), (task_view_tree_id, 'tree')]
action['help'] = False
return action
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Mitali-Sodhi/CodeLingo
|
Dataset/python/test_networks.py
|
7
|
3000
|
from novaclient.v1_1 import networks
from novaclient.tests import utils
from novaclient.tests.v1_1 import fakes
cs = fakes.FakeClient()
class NetworksTest(utils.TestCase):
def test_list_networks(self):
fl = cs.networks.list()
cs.assert_called('GET', '/os-networks')
[self.assertTrue(isinstance(f, networks.Network)) for f in fl]
def test_get_network(self):
f = cs.networks.get(1)
cs.assert_called('GET', '/os-networks/1')
self.assertTrue(isinstance(f, networks.Network))
def test_delete(self):
cs.networks.delete('networkdelete')
cs.assert_called('DELETE', '/os-networks/networkdelete')
def test_create(self):
f = cs.networks.create(label='foo')
cs.assert_called('POST', '/os-networks',
{'network': {'label': 'foo'}})
self.assertTrue(isinstance(f, networks.Network))
def test_create_allparams(self):
params = {
'label': 'bar',
'bridge': 'br0',
'bridge_interface': 'int0',
'cidr': '192.0.2.0/24',
'cidr_v6': '2001:DB8::/32',
'dns1': '1.1.1.1',
'dns2': '1.1.1.2',
'fixed_cidr': '198.51.100.0/24',
'gateway': '192.0.2.1',
'gateway_v6': '2001:DB8::1',
'multi_host': 'T',
'priority': '1',
'project_id': '1',
'vlan_start': 1,
'vpn_start': 1
}
f = cs.networks.create(**params)
cs.assert_called('POST', '/os-networks', {'network': params})
self.assertTrue(isinstance(f, networks.Network))
def test_associate_project(self):
cs.networks.associate_project('networktest')
cs.assert_called('POST', '/os-networks/add',
{'id': 'networktest'})
def test_associate_host(self):
cs.networks.associate_host('networktest', 'testHost')
cs.assert_called('POST', '/os-networks/networktest/action',
{'associate_host': 'testHost'})
def test_disassociate(self):
cs.networks.disassociate('networkdisassociate')
cs.assert_called('POST',
'/os-networks/networkdisassociate/action',
{'disassociate': None})
def test_disassociate_host_only(self):
cs.networks.disassociate('networkdisassociate', True, False)
cs.assert_called('POST',
'/os-networks/networkdisassociate/action',
{'disassociate_host': None})
def test_disassociate_project(self):
cs.networks.disassociate('networkdisassociate', False, True)
cs.assert_called('POST',
'/os-networks/networkdisassociate/action',
{'disassociate_project': None})
def test_add(self):
cs.networks.add('networkadd')
cs.assert_called('POST', '/os-networks/add',
{'id': 'networkadd'})
|
mit
|
louisgag/BlenderAndMBDyn
|
plot.py
|
2
|
1366
|
# --------------------------------------------------------------------------
# BlenderAndMBDyn
# Copyright (C) 2015 G. Douglas Baldwin - http://www.baldwintechnology.com
# --------------------------------------------------------------------------
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This file is part of BlenderAndMBDyn.
#
# BlenderAndMBDyn is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderAndMBDyn is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderAndMBDyn. If not, see <http://www.gnu.org/licenses/>.
#
# ***** END GPL LICENCE BLOCK *****
# --------------------------------------------------------------------------
import matplotlib.pyplot as plt
import pandas as pd
import sys
with sys.stdin as f:
name = f.readline().strip()
pd.read_csv(f, index_col=0).plot()
plt.gcf().canvas.set_window_title(name)
plt.xlabel("Time (in seconds)")
plt.show()
|
gpl-3.0
|
mshahbaz/exabgp
|
dev/self/load/api-internet.py
|
2
|
2256
|
#!/usr/bin/env python
import sys
import time
import random
def write (data):
sys.stdout.write(data + '\n')
sys.stdout.flush()
def main ():
if len(sys.argv) < 2:
print "%s <number of routes> <updates per second thereafter>"
sys.exit(1)
initial = sys.argv[1]
thereafter = sys.argv[2]
if not initial.isdigit() or not thereafter.isdigit():
write('please give valid numbers')
sys.exit(1)
# Limit to sane numbers :-)
number = int(initial) & 0x00FFFFFF
after = int(thereafter) & 0x0000FFFF
range1 = (number >> 16) & 0xFF
range2 = (number >> 8) & 0xFF
range3 = (number ) & 0xFF
ip = {}
nexthop = ['%d.%d.%d.%d' % (random.randint(1,200),random.randint(0,255),random.randint(0,255),random.randint(0,255)) for _ in range(200)]
for ip1 in range(0,range1):
for ip2 in range(0,256):
for ip3 in range(0,256):
generated = '%d.%d.%d.%d' % (random.randint(1,200),ip1,ip2,ip3)
ip[generated] = random.choice(nexthop)
for ip2 in range (0,range2):
for ip3 in range (0,256):
generated = '%d.%d.%d.%d' % (random.randint(1,200),range1,ip2,ip3)
ip[generated] = random.choice(nexthop)
for ip3 in range (0,range3):
generated = '%d.%d.%d.%d' % (random.randint(1,200),range1,range2,ip3)
ip[generated] = random.choice(nexthop)
count = 0
# initial table dump
for k,v in ip.iteritems():
count += 1
write('announce route %s next-hop %s med 1%02d as-path [ 100 101 102 103 104 105 106 107 108 109 110 ]' % (k,v,len(k)))
if count % 100 == 0:
sys.stderr.write('initial : announced %d\n' % count)
count &= 0xFFFFFFFe
# modify routes forever
while True:
now = time.time()
changed = {}
for k,v in ip.iteritems():
changed[k] = v
if not random.randint(0,after):
break
for k,v in changed.iteritems():
count += 2
write('withdraw route %s next-hop %s med 1%02d as-path [ 100 101 102 103 104 105 106 107 108 109 110 ]' % (k,v,len(k)))
ip[k] = random.choice(nexthop)
write('announce route %s next-hop %s med 1%02d as-path [ 100 101 102 103 104 105 106 107 108 109 110 ]' % (k,ip[k],len(k)))
if count % 100 == 0:
sys.stderr.write('updates : announced %d\n' % count)
time.sleep(time.time()-now+1.0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
bsd-3-clause
|
cloudera/hue
|
desktop/core/ext-py/jaeger-client-4.0.0/tests/test_metrics.py
|
2
|
2914
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
from jaeger_client.metrics import MetricsFactory, Metrics,\
LegacyMetricsFactory
def test_metrics_factory_noop():
mf = MetricsFactory()
mf.create_counter('foo')(1)
mf.create_timer('foo')(1)
mf.create_gauge('foo')(1)
def test_metrics_count_func_called():
m = mock.MagicMock()
metrics = Metrics(count=m)
metrics.count('foo', 1)
assert m.call_args == (('foo', 1),)
def test_metrics_timing_func_called():
m = mock.MagicMock()
metrics = Metrics(timing=m)
metrics.timing('foo', 1)
assert m.call_args == (('foo', 1),)
def test_metrics_gauge_func_called():
m = mock.MagicMock()
metrics = Metrics(gauge=m)
metrics.gauge('foo', 1)
assert m.call_args == (('foo', 1),)
def test_metrics_count_func_noops_if_given_uncallable_count_found():
metrics = Metrics(count=123)
metrics.count('foo', 1)
def test_metrics_timing_func_noops_if_given_uncallable_timing_found():
metrics = Metrics(timing=123)
metrics.timing('foo', 1)
def test_metrics_gauge_func_noops_if_given_uncallable_gauge_found():
metrics = Metrics(gauge=123)
metrics.gauge('foo', 1)
def test_legacy_metrics_factory():
cm = mock.MagicMock()
tm = mock.MagicMock()
gm = mock.MagicMock()
mf = LegacyMetricsFactory(Metrics(count=cm, timing=tm, gauge=gm))
counter = mf.create_counter(name='foo', tags={'k': 'v', 'a': 'counter'})
counter(1)
assert cm.call_args == (('foo.a_counter.k_v', 1),)
gauge = mf.create_gauge(name='bar', tags={'k': 'v', 'a': 'gauge'})
gauge(2)
assert gm.call_args == (('bar.a_gauge.k_v', 2),)
timing = mf.create_timer(name='rawr', tags={'k': 'v', 'a': 'timer'})
timing(3)
assert tm.call_args == (('rawr.a_timer.k_v', 0.003),)
mf = LegacyMetricsFactory(Metrics(timing=tm))
timing = mf.create_timer(name='wow')
timing(4)
assert tm.call_args == (('wow', 0.004),), \
'building a timer with no tags should work'
def test_legacy_metrics_factory_noop():
mf = LegacyMetricsFactory(Metrics())
counter = mf.create_counter(name='foo', tags={'a': 'counter'})
counter(1)
gauge = mf.create_gauge(name='bar', tags={'a': 'gauge'})
gauge(2)
timing = mf.create_timer(name='rawr', tags={'a': 'timer'})
timing(3)
|
apache-2.0
|
GoogleCloudPlatform/err-stackdriver
|
charts/timeseries.py
|
1
|
8301
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pulls data from the Cloud Monitoring Timeseries API."""
from datetime import timedelta, datetime
import enum
import json
from apiclient import discovery
from oauth2client.client import GoogleCredentials
def _format_frequency(time_delta):
return '{}s'.format(time_delta.total_seconds())
def _alignment_period_string_to_delta(string):
"""Turns an alignment period string into a timedelta.
Args:
string: (str) The alignment period string. Described in:
cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list
Described as: "A duration in seconds with up to nine fractional digits,
terminated by 's'." Example: "5s" meaning "5 seconds".
"""
num_seconds = float(string[:-1])
return timedelta(seconds=num_seconds)
class AlignmentPeriods(enum.Enum):
"""The alignment period for per-time series alignment.
If present, alignmentPeriod must be at least 60 seconds.
After per-time series alignment, each time series will contain data points
only on the period boundaries.
"""
# Note that MINUTES_1 is the minimum allowed alignment period.
MINUTES_1 = _format_frequency(timedelta(minutes=1))
MINUTES_5 = _format_frequency(timedelta(minutes=5))
MINUTES_10 = _format_frequency(timedelta(minutes=10))
MINUTES_15 = _format_frequency(timedelta(minutes=15))
MINUTES_20 = _format_frequency(timedelta(minutes=20))
MINUTES_30 = _format_frequency(timedelta(minutes=30))
HOURS_1 = _format_frequency(timedelta(hours=1))
HOURS_2 = _format_frequency(timedelta(hours=2))
HOURS_3 = _format_frequency(timedelta(hours=3))
HOURS_4 = _format_frequency(timedelta(hours=4))
HOURS_6 = _format_frequency(timedelta(hours=6))
HOURS_12 = _format_frequency(timedelta(hours=12))
HOURS_24 = _format_frequency(timedelta(hours=24))
class PerSeriesAligners(enum.Enum):
"""Brings the data points in a single time series into temporal alignment.
See: cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list#Aligner
"""
NONE = 'ALIGN_NONE'
DELTA = 'ALIGN_DELTA'
RATE = 'ALIGN_RATE'
INTERPOLATE = 'ALIGN_INTERPOLATE'
NEXT_OLDER = 'ALIGN_NEXT_OLDER'
MIN = 'ALIGN_MIN'
MAX = 'ALIGN_MAX'
MEAN = 'ALIGN_MEAN'
COUNT = 'ALIGN_COUNT'
SUM = 'ALIGN_SUM'
STDDEV = 'ALIGN_STDDEV'
COUNT_TRUE = 'ALIGN_COUNT_TRUE'
FRACTION_TRUE = 'ALIGN_FRACTION_TRUE'
class Client(object):
def __init__(self, monitoring_api_client):
self._monitoring_api_client = monitoring_api_client
def list_timeseries(self,
project_id: str,
metric: str,
start_time: datetime,
end_time: datetime,
alignment_period: str=AlignmentPeriods.MINUTES_1.value,
per_series_aligner: str=PerSeriesAligners.MAX.value):
"""Lists time series.
Args:
project_id: E.g., "walkshare-monitor".
metric: E.g., "compute.googleapis.com/instance/cpu/usage_time".
See https://cloud.google.com/monitoring/api/metrics for more.
start_time: A timezone-naive datetime object.
Represents the datetime, in UTC, of the first moment to look at.
The "moment" may include a summary of what happened from the N-1th
moment, up until and including the given "start_time". E.g., if start_time
is 17:15, we will show a number for the x-value 17:15 that says either
"this is what is happening at exactly 17:15", or "this is what happened
between (17:14 and 17:15]. The former only applies when per_series_aligner
is either None or "ALIGN_NONE".
In any case, if you want to know what the world was looking like
starting at 17:15, you should supply 17:15.
end_time: A timezone-naive datetime object.
Represents the datetime, in UTC, of the final moment to look at.
The "moment" may include a summary of what happened from the N-1th
moment, up until and including the given "end_time". E.g., if end_time
is 19:25, we will show a number for the x-value 19:25 that says either
"this is what is happening at exactly 19:25", or "this is what happened
between (19:24 and 19:25]. The former only applies when per_series_aligner
is either None or "ALIGN_NONE".
In any case, if you want to know what the world was looking like
ending at 18:35, you should supply 18:35.
alignment_period: The size of each timeseries data point bucket.
I.e., if you supply '5s', each bucket contains 5 seconds of data,
rounded off to the nearest 5 second window.
If perSeriesAligner is None or equals ALIGN_NONE,
then this field is ignored.
If perSeriesAligner is specified and does not equal ALIGN_NONE,
then this field must be defined.
Defaults to minutely.
per_series_aligner: The per-series aligner to use.
Defaults to "ALIGN_MAX".
Returns:
timeSeries API response as documented here:
cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list
"""
out = []
each_value_represents_a_time_bucket = (
per_series_aligner and
per_series_aligner != PerSeriesAligners.NONE.value
)
if each_value_represents_a_time_bucket:
bucket_delta = _alignment_period_string_to_delta(alignment_period)
start_time -= bucket_delta
default_request_kwargs = dict(
name='projects/{}'.format(project_id),
filter='metric.type="{}"'.format(metric),
pageSize=10000,
interval_startTime=_RFC3339(start_time),
interval_endTime=_RFC3339(end_time)
)
if alignment_period:
default_request_kwargs['aggregation_alignmentPeriod'] = alignment_period
if per_series_aligner:
default_request_kwargs['aggregation_perSeriesAligner'] = per_series_aligner
def _do_request(next_page_token=None):
kwargs = default_request_kwargs.copy()
if next_page_token:
kwargs['pageToken'] = next_page_token
req = self._monitoring_api_client.projects().timeSeries().list(**kwargs)
return req.execute()
response = _do_request()
out.extend(response.get('timeSeries', []))
next_token = response.get('nextPageToken')
while next_token:
response = _do_request(next_token)
out.extend(response.get('timeSeries', []))
next_token = response.get('nextPageToken')
return out
def _RFC3339(my_datetime):
return my_datetime.isoformat("T") + "Z"
def new_client(credentials=None):
if not credentials:
credentials = GoogleCredentials.get_application_default()
monitoring = discovery.build('monitoring', 'v3', credentials=credentials)
return Client(monitoring)
if __name__ == '__main__':
client = new_client()
res = client.list_timeseries(
project_id='walkshare-monitor',
metric='appengine.googleapis.com/system/memory/usage',
# 'compute.googleapis.com/instance/cpu/usage_time', # "compute.googleapis.com/instance/disk/read_bytes_count",
start_time=datetime.utcnow() - timedelta(hours=2),
end_time=datetime.utcnow(),
per_series_aligner=PerSeriesAligners.MAX.value,
alignment_period=AlignmentPeriods.MINUTES_1.value,
)
print(json.dumps(res))
|
apache-2.0
|
phil-r/chaos
|
tests/github_api/prs_test.py
|
1
|
1520
|
import unittest
from github_api import prs, API
class TestPRMethods(unittest.TestCase):
def test_statuses_returns_passed_travis_build(self):
statuses = [{"state": "success",
"description": "The Travis CI build passed"}]
pr = "/repos/test/blah"
class Mocked(API):
def __call__(m, method, path, **kwargs):
self.assertEqual(pr, path)
return statuses
api = Mocked("user", "pat")
url = "{}{}".format(api.BASE_URL, pr)
self.assertTrue(prs.has_build_passed(api, url))
def test_statuses_returns_failed_travis_build(self):
statuses = [{"state": "error",
"description": "The Travis CI build failed"}]
pr = "/repos/test/blah"
class Mocked(API):
def __call__(m, method, path, **kwargs):
self.assertEqual(pr, path)
return statuses
api = Mocked("user", "pat")
url = "{}{}".format(api.BASE_URL, pr)
self.assertFalse(prs.has_build_passed(api, url))
statuses = [{"state": "pending",
"description": "The Travis CI build is in progress"}]
pr = "/repos/test/blah"
class Mocked(API):
def __call__(m, method, path, **kwargs):
self.assertEqual(pr, path)
return statuses
api = Mocked("user", "pat")
url = "{}{}".format(api.BASE_URL, pr)
self.assertFalse(prs.has_build_passed(api, url))
|
mit
|
wardi/ckanext-bcgov
|
ckanext/bcgov/scripts/save_users.py
|
6
|
1093
|
# Copyright 2015, Province of British Columbia
# License: https://github.com/bcgov/ckanext-bcgov/blob/master/license
'''
This script import current users from an environment and
Save it to a json file.
This information can be used later to create the users on the same
or another environment.
'''
import json
import urllib2
import urllib
from base import (site_url, api_key)
import pprint
#ckan.logic.action.create.organization_member_create(context, data_dict)
#1) Get the list of all users
user_list = []
try :
request = urllib2.Request(site_url + '/api/3/action/user_list')
request.add_header('Authorization', api_key)
response = urllib2.urlopen(request)
assert response.code == 200
response_dict = json.loads(response.read())
assert response_dict['success'] is True
user_list = response_dict['result']
# pprint.pprint(user_list)
except Exception, e:
pass
#2) For each user find the list organizations and the user role in each org
user_file = open('./data/users_list.json', 'w')
user_file.write(json.dumps(user_list))
user_file.close()
|
agpl-3.0
|
ClearCorp-dev/odoo
|
addons/account_accountant/__init__.py
|
892
|
1046
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
boneknuckleskin/libforensics
|
code/lf/win/shell/link/objects.py
|
13
|
52049
|
# Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Objects for working with shell link files"""
# stdlib imports
from codecs import utf_16_le_decode as _utf16_le_decoder
# local imports
from lf.dec import SEEK_SET
from lf.dtypes import (
LITTLE_ENDIAN, ActiveStructuple, CtypesWrapper, Structuple
)
from lf.dtypes.ctypes import uint32_le, uint16_le
from lf.time import FILETIMETodatetime
from lf.win.objects import GUIDToUUID, CLSIDToUUID, LCID
from lf.win.con.objects import COORD
from lf.win.shell.objects import ITEMIDLIST
from lf.win.shell.link.ctypes import (
shell_link_header, link_info_header, volume_id_header, cnrl_header,
console_data_block, known_folder_data_block, special_folder_data_block,
tracker_data_block, tracker_data_block_footer, data_block_header,
file_attributes, link_flags, console_fe_data_block, darwin_data_block,
expandable_strings_data_block
)
from lf.win.shell.link.consts import (
CONSOLE_PROPS_SIG, CONSOLE_FE_PROPS_SIG, DARWIN_PROPS_SIG,
ENVIRONMENT_PROPS_SIG, ICON_ENVIRONMENT_PROPS_SIG, KNOWN_FOLDER_PROPS_SIG,
PROPERTY_STORE_PROPS_SIG, SHIM_PROPS_SIG, SPECIAL_FOLDER_PROPS_SIG,
TRACKER_PROPS_SIG, VISTA_AND_ABOVE_IDLIST_PROPS_SIG
)
__docformat__ = "restructuredtext en"
__all__ = [
"ShellLink", "FileAttributes", "LinkFlags", "ShellLinkHeader",
"StringData", "LinkInfo", "VolumeID", "CNRL", "ExtraDataBlock",
"ConsoleProps", "ConsoleFEProps", "DarwinProps",
"ExpandableStringsDataBlock", "EnvironmentProps", "IconEnvironmentProps",
"KnownFolderProps", "PropertyStoreProps", "ShimProps",
"SpecialFolderProps", "DomainRelativeObjId", "TrackerProps",
"VistaAndAboveIDListProps", "TerminalBlock", "ExtraDataBlockFactory",
"StringDataSet"
]
class ShellLink():
"""Represents a shell link (.lnk) file.
.. attribute:: header
A :class:`ShellLinkHeader` object.
.. attribute:: idlist
An :class:`~lf.win.shell.objects.ITEMIDLIST` describing the target (or
None if not present).
.. attribute:: link_info
A :class:`LinkInfo` object (or None if not present).
.. attribute:: string_data
An instance of a :class:`StringDataSet` object.
.. attribute:: extra_data
A list of :class:`ExtraDataBlock` objects.
"""
def __init__(self, stream, offset=None):
"""Initializes a ShellLink object.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the link file.
:type offset: ``int``
:param offset: The start of the link file, in :attr:`stream`.
"""
if offset is None:
offset = stream.tell()
# end if
header = ShellLinkHeader.from_stream(stream, offset)
offset += header.size
flags = header.flags
is_unicode = flags.is_unicode
if flags.has_idlist:
stream.seek(offset, SEEK_SET)
id_list_size = uint16_le.from_buffer_copy(stream.read(2)).value
offset += 2
id_list = ITEMIDLIST.from_stream(stream, offset, id_list_size)
offset += id_list_size
else:
id_list = None
# end if
if flags.has_link_info:
link_info = LinkInfo.from_stream(stream, offset)
offset += link_info.size
else:
link_info = None
# end if
if flags.has_name:
name_str = StringData.from_stream(stream, offset, is_unicode)
offset += name_str.size
else:
name_str = None
# end if
if flags.has_relative_path:
rel_path = StringData.from_stream(stream, offset, is_unicode)
offset += rel_path.size
else:
rel_path = None
# end if
if flags.has_working_dir:
working_dir = StringData.from_stream(stream, offset, is_unicode)
offset += working_dir.size
else:
working_dir = None
# end if
if flags.has_args:
cmd_args = StringData.from_stream(stream, offset, is_unicode)
offset += cmd_args.size
else:
cmd_args = None
# end if
if flags.has_icon_location:
icon_location = StringData.from_stream(stream, offset, is_unicode)
offset += icon_location.size
else:
icon_location = None
# end if
string_data = StringDataSet((
name_str, rel_path, working_dir, cmd_args, icon_location
))
extra_data = list(ExtraDataBlockFactory.make_blocks(stream, offset))
self.header = header
self.idlist = id_list
self.link_info = link_info
self.string_data = string_data
self.extra_data = extra_data
# end def __init__
# end class ShellLink
class StringDataSet(Structuple):
"""Represents a collection of :class:`StringData` objects.
.. attribute:: name_str
A :class:`StringData` object describing the shortcut (or ``None`` if
not present).
.. attribute:: rel_path
A :class:`StringData` object describing the path to the target,
relative to the file that contains the link (or ``None`` if not
present).
.. attribute:: working_dir
A :class:`StringData` object describing the working directory to use
when activating/running the target (or ``None`` if not present).
.. attribute:: cmd_args
A :class:`StringData` object describing the command line arguments to
use when activating/running the target (or ``None`` if not present).
.. attribute:: icon_location
A :class:`StringData` object describing the location of the icon to
display for the link file (or ``None`` if not present).
"""
_fields_ = (
"name_str", "rel_path", "working_dir", "cmd_args", "icon_location"
)
__slots__ = ()
# end class StringDataSet
class StringData(ActiveStructuple):
"""Represents a StringData structure.
.. attribute:: size
The size of the :class:`StringData` structure in bytes.
.. attribute:: char_count
The number of characters in the string.
.. attribute:: string
The string associated with the structure.
"""
_fields_ = (
"size", "char_count", "string"
)
_takes_stream = True
@classmethod
def from_stream(cls, stream, offset=None, is_unicode=True):
"""Creates a :class:`StringData` object from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:type is_unicode: ``bool``
:param is_unicode: If the string is in unicode (utf16-le)
:rtype: :class:`StringData`
:returns: The corresponding :class:`StringData` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
else:
offset = stream.tell()
# end if
char_count = uint16_le.from_buffer_copy(stream.read(2)).value
offset += 2
if is_unicode:
read_size = char_count * 2
else:
read_size = char_count
# end if
string = stream.read(read_size)
if is_unicode:
new_string = _utf16_le_decoder(string, "ignore")[0]
if new_string:
string = new_string
# end if
# end if
return cls((read_size + 2, char_count, string))
# end def from_stream
# end class StringData
class FileAttributes(CtypesWrapper):
""" Represents the file system attributes of a link target.
.. attribute:: read_only
True if the target is read only.
.. attribute:: hidden
True if the target is hidden.
.. attribute:: system
True if the target has the system attribute set.
.. attribute:: directory
True if the target is a directory.
.. attribute:: archive
True if the target has the archive attribute set.
.. attribute:: normal
True if this is the only bit set.
.. attribute:: temp
True if the target is a temp file.
.. attribute:: sparse
True if the target is a sparse file.
.. attribute:: reparse_point
True if the target is a reparse_point.
.. attribute:: compressed
True if the target is compressed.
.. attribute:: offline
True if the content of the target is not immediately available.
.. attribute:: not_content_indexed
True if the content of the target needs indexing.
.. attribute:: encrypted
True if the target is encrypted.
"""
_fields_ = (
"read_only", "hidden", "system", "reserved1", "directory", "archive",
"reserved2", "normal", "temp", "sparse", "reparse_point", "compressed",
"offline", "not_content_indexed", "encrypted"
)
_ctype_ = file_attributes
__slots__ = tuple()
# end def FileAttributes
class LinkFlags(CtypesWrapper):
"""Represents the LinkFlags structure from :class:`ShellLinkHeader`.
.. attribute:: has_idlist
True if the link has an :class:`~lf.win.shell.objects.ITEMIDLIST` for
the target.
.. attribute:: has_link_info
True if the link has a LinkInfo structure.
.. attribute:: has_name
True if the link has a NAME_STRING StringData structure.
.. attribute:: has_relative_path
True if the link has a RELATIVE_PATH StringData structure.
.. attribute:: has_working_dir
True if the link has a WORKING_DIR StringData structure.
.. attribute:: has_args
True if the link has a COMMAND_LINE_ARGUMENTS StringData structure.
.. attribute:: has_icon_location
True if the link has an ICON_LOCATION StringData structure.
.. attribute:: is_unicode
True if the link has unicode encoded strings.
.. attribute:: force_no_link_info
True if the LinkInfo structure should be ignored.
.. attribute:: has_exp_string
True if the link has an EnvironmentVariableDataBlock structure.
.. attribute:: run_in_separate_proc
True if the target is run in a separate VM.
.. attribute:: has_logo3_id
Undefined.
.. attribute:: has_darwin_id
True if the link has a DarwinDataBlock structure.
.. attribute:: run_as_user
True if the target is run as a different user.
.. attribute:: has_exp_icon
True if the link has an IconEnvironmentDataBlock structure.
.. attribute:: no_pidl_alias
True if the file system locations is represented in the shell
namespace.
.. attribute:: force_unc_name
True if UNC naming is required.
.. attribute:: run_with_shim_layer
True if the link has a ShimDataBlock structure.
.. attribute:: force_no_link_track
True if the TrackerDataBlock structure should be ignored.
.. attribute:: enable_target_metadata
True if the link has metadata about the target.
.. attribute:: disable_link_path_tracking
True if the EnvironmentVariableDataBlock structure should be ignored.
.. attribute:: disable_known_folder_rel_tracking
True if the SpecialFolderDataBlock and the KnownFolderDataBlock
structures should be ignored.
.. attribute:: no_kf_alias
True if the unaliased form of the known folder ID list should be used.
.. attribute:: allow_link_to_link
True if the target can be another link.
.. attribute:: unalias_on_save
True if unaliased form should be used when saving a link.
.. attribute:: prefer_environment_path
True if path specified in the EnvironmentVariableDataBlock should be
used to refer to the target.
.. attribute:: keep_local_idlist_for_unc_target
True if the local path IDlist should be stored.
"""
_fields_ = (
"has_idlist", "has_link_info", "has_name", "has_relative_path",
"has_working_dir", "has_args", "has_icon_location", "is_unicode",
"force_no_link_info", "has_exp_string", "run_in_separate_proc",
"has_logo3_id", "has_darwin_id", "run_as_user", "has_exp_icon",
"no_pidl_alias", "force_unc_name", "run_with_shim_layer",
"force_no_link_track", "enable_target_metadata",
"disable_link_path_tracking", "disable_known_folder_rel_tracking",
"no_kf_alias", "allow_link_to_link", "unalias_on_save",
"prefer_environment_path", "keep_local_idlist_for_unc_target"
)
_ctype_ = link_flags
__slots__ = tuple()
# end class LinkFlags
class ShellLinkHeader(ActiveStructuple):
"""Represents a header from a shell link (.lnk) file.
.. attribute:: size
The size of the header structure
.. attribute:: clsid
The CLSID of the link.
.. attribute:: flags
An instance of :class:`LinkFlags` describing the flags for the shell
link header.
.. attribute:: attrs
An instance of :class:`FileAttributes` describing the file attributes
for the target.
.. attribute:: btime
The creation time of the target.
.. attribute:: atime
The last access time of the target.
.. attribute:: mtime
The last modification time of the target.
.. attribute:: target_size
The size of the target.
.. attribute:: icon_index
The index of an icon.
.. attribute:: show_cmd
The state of the window, if one is launched.
.. attribute:: vkcode
The virtual keycode of the hotkey, used to activate the link.
.. attribute:: vkmod
The modifiers to vkcode.
"""
_fields_ = (
"size", "clsid", "flags", "attrs", "btime", "atime", "mtime",
"target_size", "icon_index", "show_cmd", "vkcode", "vkmod"
)
_takes_stream = True
_takes_ctype = True
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`ShellLinkHeader` object from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`ShellLinkHeader`
:returns: The corresponding :class:`ShellLinkHeader` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
else:
offset = stream.tell()
# end if
data = stream.read(76)
return cls.from_ctype((shell_link_header.from_buffer_copy(data)))
# end def from_stream
@classmethod
def from_ctype(cls, ctype):
"""Creates a :class:`ShellLinkHeader` object from a ctype.
:type ctype: :class:`lf.win.shell.dtypes.ShellLinkHeader`
:param ctype: An instance of a ShellLinkHeader ctype.
:rtype: :class:`ShellLinkHeader`
:returns: The corresponding :class:`ShellLinkHeader` object.
"""
clsid = CLSIDToUUID.from_ctype(ctype.clsid)
try:
btime = FILETIMETodatetime.from_ctype(ctype.btime)
except (ValueError, TypeError):
btime = ctype.btime
# end try
try:
atime = FILETIMETodatetime.from_ctype(ctype.atime)
except (ValueError, TypeError):
atime = ctype.atime
# end try
try:
mtime = FILETIMETodatetime.from_ctype(ctype.mtime)
except (ValueError, TypeError):
mtime = ctype.mtime
# end try
attrs = FileAttributes.from_ctype(ctype.attrs)
flags = LinkFlags.from_ctype(ctype.flags)
return cls((
ctype.size, clsid, flags, attrs, btime, atime, mtime,
ctype.target_size, ctype.icon_index, ctype.show_cmd,
ctype.hotkey.vkcode, ctype.hotkey.vkmod
))
# end def from_ctype
# end class ShellLinkHeader
class LinkInfo(ActiveStructuple):
"""Represents a LinkInfo structure.
.. attribute:: size
The size of the structure.
.. attribute:: header_size
The size of the :class:`LinkInfo` header.
.. attribute:: vol_id_and_local_base_path
Describes if the volume id and local base path are present.
.. attribute:: cnrl_and_path_suffix
Describes if the Common Network Relative Link field is present.
.. attribute:: vol_id_offset
The relative offset of the :class:`VolumeID` structure.
.. attribute:: local_base_path_offset
The relative offset of the local base path.
.. attribute:: cnrl_offset
The relative offset of the CNRL.
.. attribute:: path_suffix_offset
The relative offset of the common path suffix.
.. attribute:: local_base_path_offset_uni
The unicode version of :attr:`local_base_path_offset` (or ``None`` if
not present).
.. attribute:: path_suffix_offset_uni
The unicode version of :attr:`path_suffix_offset` (or ``None`` if not
present).
.. attribute:: vol_id
The :class:`VolumeID` structure (or ``None`` if not present).
.. attribute:: cnrl
The :class:`CNRL` structure (or ``None`` if not present).
.. attribute:: local_base_path
The local path prefix (or ``None`` if not present).
.. attribute:: local_base_path_uni
The unicode version of :attr:`local_base_path` (or ``None`` if not
present).
.. attribute:: path_suffix
The field appended to :attr:`local_base_path` (or ``None`` if not
present).
.. attribute:: path_suffix_uni
The unicode version of :attr:`path_suffix` (or ``None`` if not
present).
"""
_fields_ = (
"size", "header_size", "vol_id_and_local_base_path",
"cnrl_and_path_suffix", "vol_id_offset", "local_base_path_offset",
"cnrl_offset", "path_suffix_offset", "local_base_path_offset_uni",
"path_suffix_offset_uni", "vol_id", "cnrl", "local_base_path",
"local_base_path_uni", "path_suffix", "path_suffix_uni"
)
_takes_stream = True
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`LinkInfo` object from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`LinkInfo`
:returns: The corresponding :class:`LinkInfo` object.
"""
decoder = _utf16_le_decoder
if offset is not None:
stream.seek(offset, SEEK_SET)
else:
offset = stream.tell()
# end if
header = link_info_header.from_buffer_copy(stream.read(28))
size = header.size
vol_id_offset = header.vol_id_offset
local_base_path_offset = header.local_base_path_offset
cnrl_offset = header.cnrl_offset
path_suffix_offset = header.path_suffix_offset
if header.header_size >= 0x24:
local_base_path_offset_uni = \
uint32_le.from_buffer_copy(stream.read(4)).value
path_suffix_offset_uni = \
uint32_le.from_buffer_copy(stream.read(4)).value
else:
local_base_path_offset_uni = None
path_suffix_offset_uni = None
# end if
if header.has_vol_id_and_local_base_path:
vol_id = VolumeID.from_stream(stream, offset + vol_id_offset)
if local_base_path_offset:
new_offset = offset + local_base_path_offset
stream.seek(new_offset, SEEK_SET)
local_base_path = stream.read(size - local_base_path_offset)
local_base_path = local_base_path.split(b"\x00", 1)[0]
else:
local_base_path = None
# end if
if local_base_path_offset_uni:
new_offset = offset + local_base_path_offset_uni
read_size = size - local_base_path_offset_uni
stream.seek(offset, SEEK_SET)
local_base_path_uni = stream.read(read_size)
new_local_base_path_uni = \
decoder(local_base_path_uni, "ignore")[0]
if new_local_base_path_uni:
local_base_path_uni = \
new_local_base_path_uni.split("\x00", 1)[0]
# end if
else:
local_base_path_uni = None
# end if
else:
vol_id = None
local_base_path = None
local_base_path_uni = None
# end if
if header.has_cnrl_and_path_suffix:
cnrl = CNRL.from_stream(stream, offset + cnrl_offset)
else:
cnrl = None
# end if
if path_suffix_offset:
new_offset = offset + path_suffix_offset
stream.seek(new_offset)
path_suffix = stream.read(size - path_suffix_offset)
path_suffix = path_suffix.split(b"\x00", 1)[0]
else:
path_suffix = None
# end if
if path_suffix_offset_uni:
new_offset = offset + path_suffix_offset_uni
stream.seek(new_offset)
path_suffix_uni = stream.read(size - path_suffix_offset_uni)
new_path_suffix_uni = decoder(path_suffix_uni, "ignore")[0]
if new_path_suffix_uni:
path_suffix_uni = new_path_suffix_uni.split("\x00", 1)[0]
# end if
else:
path_suffix_uni = None
# end if
return cls((
header.size, header.header_size,
header.has_vol_id_and_local_base_path,
header.has_cnrl_and_path_suffix, vol_id_offset,
local_base_path_offset, cnrl_offset, path_suffix_offset,
local_base_path_offset_uni, path_suffix_offset_uni, vol_id, cnrl,
local_base_path, local_base_path_uni, path_suffix, path_suffix_uni
))
# end def from_stream
# end class LinkInfo
class VolumeID(ActiveStructuple):
""" Represents a VolumeID structure.
.. attribute:: size
The size of the volume id structure.
.. attribute:: drive_type
The type of drive the target is stored on.
.. attribute:: drive_serial_num
The serial number of the drive the target is on.
.. attribute:: volume_label_offset
The relative offset of the volume label.
.. attribute:: volume_label_offset_uni
The unicode version of :attr:`volume_label_offset` (or ``None`` if not
present).
.. attribute:: volume_label
The volume label of the drive the target is on.
"""
_fields_ = (
"size", "drive_type", "drive_serial_num", "volume_label_offset",
"volume_label_offset_uni", "volume_label"
)
_takes_stream = True
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`VolumeID` object from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`VolumeID`
:returns: The corresponding :class:`VolumeID` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
else:
offset = stream.tell()
# end if
header = volume_id_header.from_buffer_copy(stream.read(16))
size = header.size
volume_label = None
if header.vol_label_offset == 0x14:
# Volume label is unicode
vol_label_offset_uni = \
uint32_le.from_buffer_copy(stream.read(4)).value
if vol_label_offset_uni:
new_offset = vol_label_offset_uni + offset
stream.seek(new_offset, SEEK_SET)
volume_label = stream.read(header.size - vol_label_offset_uni)
new_volume_label = _utf16_le_decoder(volume_label, "ignore")[0]
if new_volume_label:
volume_label = new_volume_label.split("\x00", 1)[0]
# end if
# end if
else:
vol_label_offset_uni = None
new_offset = header.vol_label_offset + offset
stream.seek(new_offset, SEEK_SET)
volume_label = stream.read(size - 16)
volume_label = volume_label.split(b"\x00", 1)[0]
# end if
return cls((
header.size, header.type, header.serial_num,
header.vol_label_offset, vol_label_offset_uni, volume_label
))
# end def from_stream
# end class VolumeID
class CNRL(ActiveStructuple):
"""Represents a Common Network Relative Link structure.
.. attribute:: size
The size of the CNRL structure.
.. attribute:: valid_device
True if :attr:`device_name_offset` is valid.
.. attribute:: valid_net_type
True if :attr:`net_provider_type` is valid.
.. attribute:: net_name_offset
The relative offset of the :attr:`net_name` field.
.. attribute:: device_name_offset
The relative offset of the :attr:`device_name` field.
.. attribute: net_type
Describes the type of network provider. See :mod:`lf.win.consts.net`
for a list of valid network provider type constants.
.. attribute:: net_name_offset_uni
The unicode version of :attr:`net_name_offset`.
.. attribute:: device_name_offset_uni
The unicode version of :attr:`device_name_offset`.
.. attribute:: net_name
Specifies the server path.
.. attribute:: device_name
Specifies the device.
.. attribute:: net_name_uni
The unicode version of :attr:`net_name`.
.. attribute:: device_name_uni
The unicode version of :attr:`device_name`.
"""
_fields_ = (
"size", "valid_device", "valid_net_type", "net_name_offset",
"device_name_offset", "net_type", "net_name_offset_uni",
"device_name_offset_uni", "net_name", "device_name", "net_name_uni",
"device_name_uni"
)
takes_stream = True
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`CNRL` object from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`CNRL`
:returns: The corresponding :class:`CNRL` object.
"""
decoder = _utf16_le_decoder
if offset is not None:
stream.seek(offset, SEEK_SET)
else:
offset = stream.tell()
# end if
header = cnrl_header.from_buffer_copy(stream.read(20))
size = header.size
net_name_offset = header.net_name_offset
device_name_offset = header.device_name_offset
valid_device = header.valid_device
if net_name_offset > 0x14:
net_name_offset_uni = \
uint32_le.from_buffer_copy(stream.read(4)).value
if valid_device:
device_name_offset_uni = \
uint32_le.from_buffer_copy(stream.read(4)).value
else:
device_name_offset_uni = None
# end if
else:
net_name_offset_uni = None
device_name_offset_uni = None
# end if
if net_name_offset_uni:
new_offset = offset + net_name_offset_uni
stream.seek(new_offset, SEEK_SET)
net_name_uni = stream.read(size - net_name_offset_uni)
new_net_name_uni = decoder(net_name_uni, "ignore")[0]
if new_net_name_uni:
net_name_uni = new_net_name_uni.split("\x00", 1)[0]
# end if
else:
net_name_uni = None
# end if
if device_name_offset_uni:
new_offset = offset + device_name_offset_uni
stream.seek(new_offset, SEEK_SET)
device_name_uni = stream.read(size - device_name_offset_uni)
new_device_name_uni = decoder(device_name_uni, "ignore")[0]
if new_device_name_uni:
device_name_uni = new_device_name_uni.split("\x00", 1)[0]
# end if
else:
device_name_uni = None
# end if
if valid_device and device_name_offset:
new_offset = device_name_offset + offset
stream.seek(new_offset, SEEK_SET)
device_name = stream.read(size - device_name_offset)
device_name = device_name.split(b"\x00", 1)[0]
else:
device_name = None
# end if
if net_name_offset:
new_offset = header.net_name_offset + offset
stream.seek(new_offset, SEEK_SET)
net_name = stream.read(size - net_name_offset)
net_name = net_name.split(b"\x00", 1)[0]
else:
net_name = None
# end if
return cls((
size, valid_device, header.valid_net_type, net_name_offset,
device_name_offset, header.net_type, net_name_offset_uni,
device_name_offset_uni, net_name, device_name, net_name_uni,
device_name_uni
))
# end def from_stream
# end class CNRL
class ExtraDataBlock(ActiveStructuple):
"""Base class for :class:`ExtraDataBlock` subclasses.
.. attribute:: size
The size of the structure in bytes.
.. attribute:: sig
The signature field.
.. attribute:: data
An optional field that describes the data in the structure.
.. note::
Subclasses set this to ``None``
"""
_fields_ = (
"size", "sig", "data"
)
_takes_stream = True
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`ExtraDataBlock` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`ExtraDataBlock`
:returns: The corresponding :class:`ExtraDataBlock` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
else:
offset = stream.tell()
# end if
header = data_block_header.from_buffer_copy(stream.read(8))
offset += 8
if header.size >= 8:
data_size = header.size - 8
data = stream.read(data_size)
else:
data = None
# end if
return cls((header.size, header.sig, data))
# end def from_stream
# end class ExtraDataBlock
class ConsoleProps(ExtraDataBlock):
"""Represents a ConsoleProps structure.
.. attribute:: fill_attributes
The foreground and background text colors for the console window.
.. attribute:: popup_fill_attributes
The foreground and background text colors for the console window popup.
.. attribute:: screen_buffer_size
A :class:`~lf.win.con.objects.COORD` object describing the dimensions
of the console window buffer.
.. attribute:: window_size
A :class:`~lf.win.con.objects.COORD` object describing the dimensions
of the console window.
.. attribute:: window_origin
A :class:`~lf.win.con.objects.COORD` object describing the console
window origin.
.. attribute:: font
The font.
.. attribute:: input_buf_size
The size of the input buffer.
.. attribute:: font_size
The size (in pixels) of the font to use in the console window.
.. attribute:: font_family
The family of the font to use in the console window.
.. attribute:: font_weight
The stroke weight of the font to use in the console window.
.. attribute:: face_name
The face name of the font to use in the console window.
.. attribute:: cursor_size
The size of the cursor (in pixels) to use in the console window.
.. attribute:: full_screen
Whether or not to open the console window in full screen mode.
.. attribute:: quick_edit
True if the console window should be in quick edit mode.
.. attribute:: insert_mode
Whether or not to enable insert mode in the console window.
.. attribute: auto_position
Whether or not to automatically position the console window.
.. attribute:: history_buf_size
The number of characters to store in the history of the console window.
.. attribute:: history_buf_count
The number of characters to store in the history of the console window.
.. attribute:: history_buf_count
The number of characters to store in the history of the console window.
.. attribute:: history_no_dup
Whether or not duplicates are stored in the history buffers.
.. attribute:: color_table
A tuple of the RGB colors used for text in the console window.
"""
_fields_ = (
"fill_attributes", "popup_fill_attributes", "screen_buffer_size",
"window_size", "window_origin", "font", "input_buf_size", "font_size",
"font_family", "font_weight", "face_name", "cursor_size",
"full_screen", "quick_edit", "insert_mode", "auto_position",
"history_buf_size", "history_buf_count", "history_no_dup",
"color_table", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`ConsoleProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`ConsoleProps`
:returns: The corresponding :class:`ConsoleProps` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
# end if
cdb = console_data_block.from_buffer_copy(stream.read(204))
face_name = bytes(cdb.face_name)
new_face_name = _utf16_le_decoder(face_name, "ignore")[0]
if new_face_name:
face_name = new_face_name.split("\x00", 1)[0]
# end if
screen_buffer_size = COORD.from_ctype(cdb.screen_buffer_size)
window_size = COORD.from_ctype(cdb.window_size)
window_origin = COORD.from_ctype(cdb.window_origin)
return cls((
cdb.size, cdb.sig, cdb.fill_attributes, cdb.popup_fill_attributes,
screen_buffer_size, window_size, window_origin, cdb.font,
cdb.input_buf_size, cdb.font_size, cdb.font_family,
cdb.font_weight, face_name, cdb.cursor_size, cdb.full_screen,
cdb.quick_edit, cdb.insert_mode, cdb.auto_position,
cdb.history_buf_size, cdb.history_buf_count, cdb.history_no_dup,
list(cdb.color_table), None
))
# end def from_stream
# end class ConsoleProps
class ConsoleFEProps(ExtraDataBlock):
"""Represents a ConsoleFEProps structure.
.. attribute:: code_page
The code page LCID used to display text.
"""
_fields_ = (
"code_page", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`ConsoleFEProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`ConsoleFEProps`
:returns: The corresponding :class:`ConsoleFEProps` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
# end if
blk = console_fe_data_block.from_buffer_copy(stream.read(12))
return cls((blk.size, blk.sig, LCID.from_ctype(blk.code_page), None))
# end def from_stream
# end class ConsoleFEProps
class DarwinProps(ExtraDataBlock):
"""Represents a DarwinProps structure.
.. attribute:: darwin_data_ansi
An application identifier.
.. attribute:: darwin_data_uni
A unicode version of :attr:`darwin_data_ansi`.
"""
_fields_ = (
"darwin_data_ansi", "darwin_data_uni", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`DarwinProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`DarwinProps`
:returns: The corresponding :class:`DarwinProps` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
# end if
ddb = darwin_data_block.from_buffer_copy(stream.read(788))
darwin_data_ansi = bytes(ddb.darwin_data_ansi)
darwin_data_ansi = darwin_data_ansi.split(b"\x00", 1)[0]
darwin_data_uni = bytes(ddb.darwin_data_uni)
new_darwin_data_uni = _utf16_le_decoder(darwin_data_uni, "ignore")[0]
if new_darwin_data_uni:
darwin_data_uni = new_darwin_data_uni.split("\x00", 1)[0]
# end if
return cls((
ddb.size, ddb.sig, darwin_data_ansi, darwin_data_uni, None
))
# end def from_stream
# end class DarwinProps
class ExpandableStringsDataBlock(ExtraDataBlock):
"""Base class for blocks that use environment variables.
.. attribute:: target_ansi
A path that is constructed with environment variables.
.. attribute:: target_uni
A unicode version of :attr:`target_ansi`
"""
_fields_ = (
"target_ansi", "target_uni", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`ExpandableStringsDataBlock` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`ExpandableStringsDataBlock`
:returns: The corresponding :class:`ExpandableStringsDataBlock` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
# end if
edb = expandable_strings_data_block.from_buffer_copy(stream.read(788))
target_ansi = bytes(edb.target_ansi)
target_ansi = target_ansi.split(b"\x00", 1)[0]
target_uni = bytes(edb.target_uni)
new_target_uni = _utf16_le_decoder(target_uni, "ignore")[0]
if new_target_uni:
target_uni = new_target_uni.split("\x00", 1)[0]
# end if
return cls((edb.size, edb.sig, target_ansi, target_uni, None))
# end def from_stream
# end class ExpandableStringsDataBlock
class EnvironmentProps(ExpandableStringsDataBlock):
"""Path to environment variable information."""
pass
# end class EnvironmentProps
class IconEnvironmentProps(ExpandableStringsDataBlock):
"""Path to an icon encoded with environment variables."""
pass
# end class IconEnvironmentProps
class KnownFolderProps(ExtraDataBlock):
"""Represents a KnownFolderProps structure.
.. attribute:: kf_id
A GUID for the folder.
.. attribute:: offset
The index in the item id list of the known folder.
"""
_fields_ = (
"kf_id", "offset", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`KnownFolderProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`KnownFolderProps`
:returns: The corresponding :class:`KnownFolderProps` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
# end if
kfb = known_folder_data_block.from_buffer_copy(stream.read(28))
kf_id = GUIDToUUID.from_ctype(kfb.kf_id)
return cls((kfb.size, kfb.sig, kf_id, kfb.offset, None))
# end class from_stream
# end class KnownFolderProps
class PropertyStoreProps(ExtraDataBlock):
"""Represents serialized property storage values.
.. attribute:: property_store
A serialized property storage structure (currently not implemented).
"""
_fields_ = (
"property_store", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`PropertyStoreProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`PropertyStoreProps`
:returns: The corresponding :class:`PropertyStoreProps` object.
"""
edb = ExtraDataBlock.from_stream(stream, offset)
return cls((edb.size, edb.sig, edb.data, None))
# end class from_stream
# end class PropertyStoreProps
class ShimProps(ExtraDataBlock):
"""Specifies the name of a shim to use when activating/running the target.
.. attribute:: layer_name
A unicode name of the shim layer.
"""
_fields_ = (
"layer_name", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`ShimProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`ShimProps`
:returns: The corresponding :class:`ShimProps` object.
"""
edb = ExtraDataBlock.from_stream(stream, offset)
layer_name = edb.data
new_layer_name = _utf16_le_decoder(layer_name, "ignore")[0]
if new_layer_name:
layer_name = new_layer_name.split("\x00", 1)[0]
# end if
return cls((edb.size, edb.sig, layer_name, None))
# end def from_stream
# end class ShimProps
class SpecialFolderProps(ExtraDataBlock):
"""Specifies the location of special folders in an item id list.
.. attribute:: sf_id
The special folder identifier.
.. attribute:: offset
The index in the item id list of the special folder.
"""
_fields_ = (
"sf_id", "offset", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`SpecialFolderProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`SpecialFolderProps`
:returns: The corresponding :class:`SpecialFolderProps` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
# end if
sfdb = special_folder_data_block.from_buffer_copy(stream.read(16))
return cls((sfdb.size, sfdb.sig, sfdb.sf_id, sfdb.offset, None))
# end def from_stream
# end class SpecialFolderProps
class DomainRelativeObjId(ActiveStructuple):
"""Represents a domain relative object identifier (DROID).
.. attribute:: volume
The volume field.
.. attribute:: object
The object field.
"""
_fields_ = ("volume", "object")
_takes_stream = True
_takes_ctype = True
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`DomainRelativeObjId` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`DomainRelativeObjId`
:returns: The corresponding :class:`DomainRelativeObjId` object.
"""
if offset is None:
offset = stream.tell()
# end if
volume = GUIDToUUID.from_stream(stream, offset, LITTLE_ENDIAN)
object = GUIDToUUID.from_stream(stream, offset + 16, LITTLE_ENDIAN)
return DomainRelativeObjId((volume, object))
# end def from_stream
@classmethod
def from_ctype(cls, ctype):
"""Creates a :class:`DomainRelativeObjId` object from a ctype.
:type ctype: :class:`lf.win.shell.dtypes.DomainRelativeObjId`
:param ctype: An instance of a DomainRelativeObjId ctype.
:rtype: :class:`DomainRelativeObjId`
:returns: The corresponding :class:`DomainRelativeObjId` object.
"""
return DomainRelativeObjId((
GUIDToUUID.from_ctype(ctype.volume),
GUIDToUUID.from_ctype(ctype.object)
))
# end def from_ctype
# end class DomainRelativeObjId
class TrackerProps(ExtraDataBlock):
""" Data used to resolve a link target with the Link Tracking Service.
.. attribute:: length
The length of the structure (excluding the size and signature).
.. attribute:: version
The version field.
.. attribute:: machine_id
The NetBIOS name of the machine the target was last known to reside on.
.. attribute:: droid
A :class:`DomainRelativeObjId` structure used to find the target.
.. attribute:: droid_birth
A :class:`DomainRelativeObjId` structure used to find the target.
"""
_fields_ = (
"length", "version", "machine_id", "droid", "droid_birth", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`TrackerProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`TrackerProps`
:returns: The corresponding :class:`TrackerProps` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
# end if
tdb = tracker_data_block.from_buffer_copy(stream.read(16))
length = tdb.length
machine_id = stream.read(length - 72).split(b"\x00", 1)[0]
tdbf = tracker_data_block_footer.from_buffer_copy(stream.read(64))
droid = DomainRelativeObjId.from_ctype(tdbf.droid)
droid_birth = DomainRelativeObjId.from_ctype(tdbf.droid_birth)
return cls((
tdb.size, tdb.sig, length, tdb.version, machine_id, droid,
droid_birth, None
))
# end def from_stream
# end class TrackerProps
class VistaAndAboveIDListProps(ExtraDataBlock):
"""An alternative to an item id list.
.. attribute:: idlist
An alternate item id list.
"""
_fields_ = (
"idlist", "data"
)
@classmethod
def from_stream(cls, stream, offset=None):
"""Creates a :class:`VistaAndAboveIDListProps` from a stream.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structure.
:type offset: ``int``
:param offset: The start of the structure in the stream.
:rtype: :class:`VistaAndAboveIDListProps`
:returns: The corresponding :class:`VistaAndAboveIDListProps` object.
"""
if offset is not None:
stream.seek(offset, SEEK_SET)
else:
offset = stream.tell()
# end if
header = data_block_header.from_buffer_copy(stream.read(8))
offset += 8
list_size = header.size - 8
idlist = ITEMIDLIST.from_stream(stream, offset, list_size)
return cls((header.size, header.sig, idlist, None))
# end def from_stream
# end class VistaAndAboveIDListProps
class TerminalBlock(ExtraDataBlock):
"""Represents a Terminal block."""
pass
# end class TerminalBlock
class ExtraDataBlockFactory():
"""Makes :class:`ExtraDataBlock` (and subclass) objects.
.. attribute:: props_map
A dictionary mapping variosu signature values to their corresponding
object factories. Used by :meth:`make_blocks`.
"""
props_map = {
CONSOLE_PROPS_SIG: ConsoleProps.from_stream,
CONSOLE_FE_PROPS_SIG: ConsoleFEProps.from_stream,
DARWIN_PROPS_SIG: DarwinProps.from_stream,
ENVIRONMENT_PROPS_SIG: EnvironmentProps.from_stream,
ICON_ENVIRONMENT_PROPS_SIG: IconEnvironmentProps.from_stream,
KNOWN_FOLDER_PROPS_SIG: KnownFolderProps.from_stream,
PROPERTY_STORE_PROPS_SIG: PropertyStoreProps.from_stream,
SHIM_PROPS_SIG: ShimProps.from_stream,
SPECIAL_FOLDER_PROPS_SIG: SpecialFolderProps.from_stream,
TRACKER_PROPS_SIG: TrackerProps.from_stream,
VISTA_AND_ABOVE_IDLIST_PROPS_SIG: VistaAndAboveIDListProps.from_stream
}
@classmethod
def make_blocks(cls, stream, offset=None):
"""reates a series of :class:`ExtraDataBlock` (or subclass) objects.
:type stream: :class:`~lf.dec.IStream`
:param stream: A stream that contains the structures.
:type offset: ``int``
:param offset: The start of the structures in the stream.
:rtype: ``iterator``
:returns: An iterator of the corresponding objects.
"""
if offset is None:
offset = stream.tell()
# end if
props_map = ExtraDataBlockFactory.props_map
stream.seek(offset, SEEK_SET)
data = stream.read(4)
if len(data) != 4:
return
# end if
size = uint32_le.from_buffer_copy(data).value
if size < 4:
return
# end if
data = stream.read(4)
if len(data) != 4:
return
# end if
sig = uint32_le.from_buffer_copy(data).value
if sig == 0:
return
# end if
if sig in props_map:
block = props_map[sig](stream, offset)
else:
block = ExtraDataBlock.from_stream
# end if
yield block
offset += size
while block.sig != 0:
stream.seek(offset, SEEK_SET)
data = stream.read(4)
if len(data) != 4:
break
# end if
size = uint32_le.from_buffer_copy(data).value
if size < 4:
break
# end if
data = stream.read(4)
if len(data) != 4:
break
# end if
sig = uint32_le.from_buffer_copy(data).value
if sig in props_map:
block = props_map[sig](stream, offset)
else:
block = ExtraDataBlock.from_stream(stream, offset)
# end if
yield block
offset += size
# end while
# end def make_blocks
# end class ExtraDataBlockFactory
|
gpl-3.0
|
MatthewWilkes/mw4068-packaging
|
src/melange/src/soc/models/mentor.py
|
2
|
1664
|
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Organization Mentor Model."""
__authors__ = [
'"Todd Larsen" <[email protected]>',
'"Sverre Rabbelier" <[email protected]>',
'"Lennard de Rijk" <[email protected]>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext
import soc.models.program
import soc.models.role
class Mentor(soc.models.role.Role):
"""Organization Mentor.
"""
#: A required property that defines the program that this mentor works for
program = db.ReferenceProperty(reference_class=soc.models.program.Program,
required=True, collection_name='mentors')
can_we_contact_you = db.BooleanProperty(verbose_name=ugettext(
'Can we contact you?'))
can_we_contact_you.help_text = ugettext(
'Please check here if you would not mind being contacted by the Program'
' Administrators for follow up with members of the press who would like'
' to interview you about the program.')
can_we_contact_you.group = ugettext("2. Contact Info (Private)")
|
apache-2.0
|
Jonekee/chromium.src
|
third_party/closure_linter/closure_linter/tokenutil.py
|
108
|
19916
|
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Token utility functions."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
import copy
import StringIO
from closure_linter.common import tokens
from closure_linter.javascripttokens import JavaScriptToken
from closure_linter.javascripttokens import JavaScriptTokenType
# Shorthand
Type = tokens.TokenType
def GetFirstTokenInSameLine(token):
"""Returns the first token in the same line as token.
Args:
token: Any token in the line.
Returns:
The first token in the same line as token.
"""
while not token.IsFirstInLine():
token = token.previous
return token
def GetFirstTokenInPreviousLine(token):
"""Returns the first token in the previous line as token.
Args:
token: Any token in the line.
Returns:
The first token in the previous line as token, or None if token is on the
first line.
"""
first_in_line = GetFirstTokenInSameLine(token)
if first_in_line.previous:
return GetFirstTokenInSameLine(first_in_line.previous)
return None
def GetLastTokenInSameLine(token):
"""Returns the last token in the same line as token.
Args:
token: Any token in the line.
Returns:
The last token in the same line as token.
"""
while not token.IsLastInLine():
token = token.next
return token
def GetAllTokensInSameLine(token):
"""Returns all tokens in the same line as the given token.
Args:
token: Any token in the line.
Returns:
All tokens on the same line as the given token.
"""
first_token = GetFirstTokenInSameLine(token)
last_token = GetLastTokenInSameLine(token)
tokens_in_line = []
while first_token != last_token:
tokens_in_line.append(first_token)
first_token = first_token.next
tokens_in_line.append(last_token)
return tokens_in_line
def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False):
"""Returns the first token where func is True within distance of this token.
Args:
start_token: The token to start searching from
func: The function to call to test a token for applicability
end_func: The function to call to test a token to determine whether to abort
the search.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token matching func within distance of this token, or None if no
such token is found.
"""
token = start_token
if reverse:
while token and (distance is None or distance > 0):
previous = token.previous
if previous:
if func(previous):
return previous
if end_func and end_func(previous):
return None
token = previous
if distance is not None:
distance -= 1
else:
while token and (distance is None or distance > 0):
next_token = token.next
if next_token:
if func(next_token):
return next_token
if end_func and end_func(next_token):
return None
token = next_token
if distance is not None:
distance -= 1
return None
def Search(start_token, token_types, distance=None, reverse=False):
"""Returns the first token of type in token_types within distance.
Args:
start_token: The token to start searching from
token_types: The allowable types of the token being searched for
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
"""
return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
None, distance, reverse)
def SearchExcept(start_token, token_types, distance=None, reverse=False):
"""Returns the first token not of any type in token_types within distance.
Args:
start_token: The token to start searching from
token_types: The unallowable types of the token being searched for
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
"""
return CustomSearch(start_token,
lambda token: not token.IsAnyType(token_types),
None, distance, reverse)
def SearchUntil(start_token, token_types, end_types, distance=None,
reverse=False):
"""Returns the first token of type in token_types before a token of end_type.
Args:
start_token: The token to start searching from.
token_types: The allowable types of the token being searched for.
end_types: Types of tokens to abort search if we find.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token
before any tokens of type in end_type, or None if no such token is found.
"""
return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
lambda token: token.IsAnyType(end_types),
distance, reverse)
def DeleteToken(token):
"""Deletes the given token from the linked list.
Args:
token: The token to delete
"""
# When deleting a token, we do not update the deleted token itself to make
# sure the previous and next pointers are still pointing to tokens which are
# not deleted. Also it is very hard to keep track of all previously deleted
# tokens to update them when their pointers become invalid. So we add this
# flag that any token linked list iteration logic can skip deleted node safely
# when its current token is deleted.
token.is_deleted = True
if token.previous:
token.previous.next = token.next
if token.next:
token.next.previous = token.previous
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next
def DeleteTokens(token, token_count):
"""Deletes the given number of tokens starting with the given token.
Args:
token: The token to start deleting at.
token_count: The total number of tokens to delete.
"""
for i in xrange(1, token_count):
DeleteToken(token.next)
DeleteToken(token)
def InsertTokenBefore(new_token, token):
"""Insert new_token before token.
Args:
new_token: A token to be added to the stream
token: A token already in the stream
"""
new_token.next = token
new_token.previous = token.previous
new_token.metadata = copy.copy(token.metadata)
if new_token.IsCode():
old_last_code = token.metadata.last_code
following_token = token
while (following_token and
following_token.metadata.last_code == old_last_code):
following_token.metadata.last_code = new_token
following_token = following_token.next
token.previous = new_token
if new_token.previous:
new_token.previous.next = new_token
if new_token.start_index is None:
if new_token.line_number == token.line_number:
new_token.start_index = token.start_index
else:
previous_token = new_token.previous
if previous_token:
new_token.start_index = (previous_token.start_index +
len(previous_token.string))
else:
new_token.start_index = 0
iterator = new_token.next
while iterator and iterator.line_number == new_token.line_number:
iterator.start_index += len(new_token.string)
iterator = iterator.next
def InsertTokenAfter(new_token, token):
"""Insert new_token after token.
Args:
new_token: A token to be added to the stream
token: A token already in the stream
"""
new_token.previous = token
new_token.next = token.next
new_token.metadata = copy.copy(token.metadata)
if token.IsCode():
new_token.metadata.last_code = token
if new_token.IsCode():
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = new_token
following_token = following_token.next
token.next = new_token
if new_token.next:
new_token.next.previous = new_token
if new_token.start_index is None:
if new_token.line_number == token.line_number:
new_token.start_index = token.start_index + len(token.string)
else:
new_token.start_index = 0
iterator = new_token.next
while iterator and iterator.line_number == new_token.line_number:
iterator.start_index += len(new_token.string)
iterator = iterator.next
def InsertTokensAfter(new_tokens, token):
"""Insert multiple tokens after token.
Args:
new_tokens: An array of tokens to be added to the stream
token: A token already in the stream
"""
# TODO(user): It would be nicer to have InsertTokenAfter defer to here
# instead of vice-versa.
current_token = token
for new_token in new_tokens:
InsertTokenAfter(new_token, current_token)
current_token = new_token
def InsertSpaceTokenAfter(token):
"""Inserts a space token after the given token.
Args:
token: The token to insert a space token after
Returns:
A single space token
"""
space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
token.line_number)
InsertTokenAfter(space_token, token)
def InsertBlankLineAfter(token):
"""Inserts a blank line after the given token.
Args:
token: The token to insert a blank line after
Returns:
A single space token
"""
blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
token.line_number + 1)
InsertLineAfter(token, [blank_token])
def InsertLineAfter(token, new_tokens):
"""Inserts a new line consisting of new_tokens after the given token.
Args:
token: The token to insert after.
new_tokens: The tokens that will make up the new line.
"""
insert_location = token
for new_token in new_tokens:
InsertTokenAfter(new_token, insert_location)
insert_location = new_token
# Update all subsequent line numbers.
next_token = new_tokens[-1].next
while next_token:
next_token.line_number += 1
next_token = next_token.next
def SplitToken(token, position):
"""Splits the token into two tokens at position.
Args:
token: The token to split
position: The position to split at. Will be the beginning of second token.
Returns:
The new second token.
"""
new_string = token.string[position:]
token.string = token.string[:position]
new_token = JavaScriptToken(new_string, token.type, token.line,
token.line_number)
InsertTokenAfter(new_token, token)
return new_token
def Compare(token1, token2):
"""Compares two tokens and determines their relative order.
Args:
token1: The first token to compare.
token2: The second token to compare.
Returns:
A negative integer, zero, or a positive integer as the first token is
before, equal, or after the second in the token stream.
"""
if token2.line_number != token1.line_number:
return token1.line_number - token2.line_number
else:
return token1.start_index - token2.start_index
def GoogScopeOrNoneFromStartBlock(token):
"""Determines if the given START_BLOCK is part of a goog.scope statement.
Args:
token: A token of type START_BLOCK.
Returns:
The goog.scope function call token, or None if such call doesn't exist.
"""
if token.type != JavaScriptTokenType.START_BLOCK:
return None
# Search for a goog.scope statement, which will be 5 tokens before the
# block. Illustration of the tokens found prior to the start block:
# goog.scope(function() {
# 5 4 3 21 ^
maybe_goog_scope = token
for unused_i in xrange(5):
maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
maybe_goog_scope.previous else None)
if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
return maybe_goog_scope
def GetTokenRange(start_token, end_token):
"""Returns a list of tokens between the two given, inclusive.
Args:
start_token: Start token in the range.
end_token: End token in the range.
Returns:
A list of tokens, in order, from start_token to end_token (including start
and end). Returns none if the tokens do not describe a valid range.
"""
token_range = []
token = start_token
while token:
token_range.append(token)
if token == end_token:
return token_range
token = token.next
def TokensToString(token_iterable):
"""Convert a number of tokens into a string.
Newlines will be inserted whenever the line_number of two neighboring
strings differ.
Args:
token_iterable: The tokens to turn to a string.
Returns:
A string representation of the given tokens.
"""
buf = StringIO.StringIO()
token_list = list(token_iterable)
if not token_list:
return ''
line_number = token_list[0].line_number
for token in token_list:
while line_number < token.line_number:
line_number += 1
buf.write('\n')
if line_number > token.line_number:
line_number = token.line_number
buf.write('\n')
buf.write(token.string)
return buf.getvalue()
def GetPreviousCodeToken(token):
"""Returns the code token before the specified token.
Args:
token: A token.
Returns:
The code token before the specified token or None if no such token
exists.
"""
return CustomSearch(
token,
lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
reverse=True)
def GetNextCodeToken(token):
"""Returns the next code token after the specified token.
Args:
token: A token.
Returns:
The next code token after the specified token or None if no such token
exists.
"""
return CustomSearch(
token,
lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
reverse=False)
def GetIdentifierStart(token):
"""Returns the first token in an identifier.
Given a token which is part of an identifier, returns the token at the start
of the identifier.
Args:
token: A token which is part of an identifier.
Returns:
The token at the start of the identifier or None if the identifier was not
of the form 'a.b.c' (e.g. "['a']['b'].c").
"""
start_token = token
previous_code_token = GetPreviousCodeToken(token)
while (previous_code_token and (
previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
_IsDot(previous_code_token))):
start_token = previous_code_token
previous_code_token = GetPreviousCodeToken(previous_code_token)
if _IsDot(start_token):
return None
return start_token
def GetIdentifierForToken(token):
"""Get the symbol specified by a token.
Given a token, this function additionally concatenates any parts of an
identifying symbol being identified that are split by whitespace or a
newline.
The function will return None if the token is not the first token of an
identifier.
Args:
token: The first token of a symbol.
Returns:
The whole symbol, as a string.
"""
# Search backward to determine if this token is the first token of the
# identifier. If it is not the first token, return None to signal that this
# token should be ignored.
prev_token = token.previous
while prev_token:
if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or
_IsDot(prev_token)):
return None
if (prev_token.IsType(tokens.TokenType.WHITESPACE) or
prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)):
prev_token = prev_token.previous
else:
break
# A "function foo()" declaration.
if token.type is JavaScriptTokenType.FUNCTION_NAME:
return token.string
# A "var foo" declaration (if the previous token is 'var')
previous_code_token = GetPreviousCodeToken(token)
if previous_code_token and previous_code_token.IsKeyword('var'):
return token.string
# Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that
# could span multiple lines or be broken up by whitespace. We need
# to concatenate.
identifier_types = set([
JavaScriptTokenType.IDENTIFIER,
JavaScriptTokenType.SIMPLE_LVALUE
])
assert token.type in identifier_types
# Start with the first token
symbol_tokens = [token]
if token.next:
for t in token.next:
last_symbol_token = symbol_tokens[-1]
# An identifier is part of the previous symbol if it has a trailing
# dot.
if t.type in identifier_types:
if last_symbol_token.string.endswith('.'):
symbol_tokens.append(t)
continue
else:
break
# A dot is part of the previous symbol if it does not have a trailing
# dot.
if _IsDot(t):
if not last_symbol_token.string.endswith('.'):
symbol_tokens.append(t)
continue
else:
break
# Skip any whitespace
if t.type in JavaScriptTokenType.NON_CODE_TYPES:
continue
# This is the end of the identifier. Stop iterating.
break
if symbol_tokens:
return ''.join([t.string for t in symbol_tokens])
def GetStringAfterToken(token):
"""Get string after token.
Args:
token: Search will be done after this token.
Returns:
String if found after token else None (empty string will also
return None).
Search until end of string as in case of empty string Type.STRING_TEXT is not
present/found and don't want to return next string.
E.g.
a = '';
b = 'test';
When searching for string after 'a' if search is not limited by end of string
then it will return 'test' which is not desirable as there is a empty string
before that.
This will return None for cases where string is empty or no string found
as in both cases there is no Type.STRING_TEXT.
"""
string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT,
[JavaScriptTokenType.SINGLE_QUOTE_STRING_END,
JavaScriptTokenType.DOUBLE_QUOTE_STRING_END])
if string_token:
return string_token.string
else:
return None
def _IsDot(token):
"""Whether the token represents a "dot" operator (foo.bar)."""
return token.type is tokens.TokenType.NORMAL and token.string == '.'
|
bsd-3-clause
|
michaelni/audacity
|
lib-src/lv2/lv2/plugins/eg04-sampler.lv2/waflib/Tools/glib2.py
|
330
|
8170
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Task,Utils,Options,Errors,Logs
from waflib.TaskGen import taskgen_method,before_method,after_method,feature
@taskgen_method
def add_marshal_file(self,filename,prefix):
if not hasattr(self,'marshal_list'):
self.marshal_list=[]
self.meths.append('process_marshal')
self.marshal_list.append((filename,prefix))
@before_method('process_source')
def process_marshal(self):
for f,prefix in getattr(self,'marshal_list',[]):
node=self.path.find_resource(f)
if not node:
raise Errors.WafError('file not found %r'%f)
h_node=node.change_ext('.h')
c_node=node.change_ext('.c')
task=self.create_task('glib_genmarshal',node,[h_node,c_node])
task.env.GLIB_GENMARSHAL_PREFIX=prefix
self.source=self.to_nodes(getattr(self,'source',[]))
self.source.append(c_node)
class glib_genmarshal(Task.Task):
def run(self):
bld=self.inputs[0].__class__.ctx
get=self.env.get_flat
cmd1="%s %s --prefix=%s --header > %s"%(get('GLIB_GENMARSHAL'),self.inputs[0].srcpath(),get('GLIB_GENMARSHAL_PREFIX'),self.outputs[0].abspath())
ret=bld.exec_command(cmd1)
if ret:return ret
c='''#include "%s"\n'''%self.outputs[0].name
self.outputs[1].write(c)
cmd2="%s %s --prefix=%s --body >> %s"%(get('GLIB_GENMARSHAL'),self.inputs[0].srcpath(),get('GLIB_GENMARSHAL_PREFIX'),self.outputs[1].abspath())
return bld.exec_command(cmd2)
vars=['GLIB_GENMARSHAL_PREFIX','GLIB_GENMARSHAL']
color='BLUE'
ext_out=['.h']
@taskgen_method
def add_enums_from_template(self,source='',target='',template='',comments=''):
if not hasattr(self,'enums_list'):
self.enums_list=[]
self.meths.append('process_enums')
self.enums_list.append({'source':source,'target':target,'template':template,'file-head':'','file-prod':'','file-tail':'','enum-prod':'','value-head':'','value-prod':'','value-tail':'','comments':comments})
@taskgen_method
def add_enums(self,source='',target='',file_head='',file_prod='',file_tail='',enum_prod='',value_head='',value_prod='',value_tail='',comments=''):
if not hasattr(self,'enums_list'):
self.enums_list=[]
self.meths.append('process_enums')
self.enums_list.append({'source':source,'template':'','target':target,'file-head':file_head,'file-prod':file_prod,'file-tail':file_tail,'enum-prod':enum_prod,'value-head':value_head,'value-prod':value_prod,'value-tail':value_tail,'comments':comments})
@before_method('process_source')
def process_enums(self):
for enum in getattr(self,'enums_list',[]):
task=self.create_task('glib_mkenums')
env=task.env
inputs=[]
source_list=self.to_list(enum['source'])
if not source_list:
raise Errors.WafError('missing source '+str(enum))
source_list=[self.path.find_resource(k)for k in source_list]
inputs+=source_list
env['GLIB_MKENUMS_SOURCE']=[k.abspath()for k in source_list]
if not enum['target']:
raise Errors.WafError('missing target '+str(enum))
tgt_node=self.path.find_or_declare(enum['target'])
if tgt_node.name.endswith('.c'):
self.source.append(tgt_node)
env['GLIB_MKENUMS_TARGET']=tgt_node.abspath()
options=[]
if enum['template']:
template_node=self.path.find_resource(enum['template'])
options.append('--template %s'%(template_node.abspath()))
inputs.append(template_node)
params={'file-head':'--fhead','file-prod':'--fprod','file-tail':'--ftail','enum-prod':'--eprod','value-head':'--vhead','value-prod':'--vprod','value-tail':'--vtail','comments':'--comments'}
for param,option in params.items():
if enum[param]:
options.append('%s %r'%(option,enum[param]))
env['GLIB_MKENUMS_OPTIONS']=' '.join(options)
task.set_inputs(inputs)
task.set_outputs(tgt_node)
class glib_mkenums(Task.Task):
run_str='${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}'
color='PINK'
ext_out=['.h']
@taskgen_method
def add_settings_schemas(self,filename_list):
if not hasattr(self,'settings_schema_files'):
self.settings_schema_files=[]
if not isinstance(filename_list,list):
filename_list=[filename_list]
self.settings_schema_files.extend(filename_list)
@taskgen_method
def add_settings_enums(self,namespace,filename_list):
if hasattr(self,'settings_enum_namespace'):
raise Errors.WafError("Tried to add gsettings enums to '%s' more than once"%self.name)
self.settings_enum_namespace=namespace
if type(filename_list)!='list':
filename_list=[filename_list]
self.settings_enum_files=filename_list
def r_change_ext(self,ext):
name=self.name
k=name.rfind('.')
if k>=0:
name=name[:k]+ext
else:
name=name+ext
return self.parent.find_or_declare([name])
@feature('glib2')
def process_settings(self):
enums_tgt_node=[]
install_files=[]
settings_schema_files=getattr(self,'settings_schema_files',[])
if settings_schema_files and not self.env['GLIB_COMPILE_SCHEMAS']:
raise Errors.WafError("Unable to process GSettings schemas - glib-compile-schemas was not found during configure")
if hasattr(self,'settings_enum_files'):
enums_task=self.create_task('glib_mkenums')
source_list=self.settings_enum_files
source_list=[self.path.find_resource(k)for k in source_list]
enums_task.set_inputs(source_list)
enums_task.env['GLIB_MKENUMS_SOURCE']=[k.abspath()for k in source_list]
target=self.settings_enum_namespace+'.enums.xml'
tgt_node=self.path.find_or_declare(target)
enums_task.set_outputs(tgt_node)
enums_task.env['GLIB_MKENUMS_TARGET']=tgt_node.abspath()
enums_tgt_node=[tgt_node]
install_files.append(tgt_node)
options='--comments "<!-- @comment@ -->" --fhead "<schemalist>" --vhead " <@type@ id=\\"%s.@EnumName@\\">" --vprod " <value nick=\\"@valuenick@\\" value=\\"@valuenum@\\"/>" --vtail " </@type@>" --ftail "</schemalist>" '%(self.settings_enum_namespace)
enums_task.env['GLIB_MKENUMS_OPTIONS']=options
for schema in settings_schema_files:
schema_task=self.create_task('glib_validate_schema')
schema_node=self.path.find_resource(schema)
if not schema_node:
raise Errors.WafError("Cannot find the schema file '%s'"%schema)
install_files.append(schema_node)
source_list=enums_tgt_node+[schema_node]
schema_task.set_inputs(source_list)
schema_task.env['GLIB_COMPILE_SCHEMAS_OPTIONS']=[("--schema-file="+k.abspath())for k in source_list]
target_node=r_change_ext(schema_node,'.xml.valid')
schema_task.set_outputs(target_node)
schema_task.env['GLIB_VALIDATE_SCHEMA_OUTPUT']=target_node.abspath()
def compile_schemas_callback(bld):
if not bld.is_install:return
Logs.pprint('YELLOW','Updating GSettings schema cache')
command=Utils.subst_vars("${GLIB_COMPILE_SCHEMAS} ${GSETTINGSSCHEMADIR}",bld.env)
ret=self.bld.exec_command(command)
if self.bld.is_install:
if not self.env['GSETTINGSSCHEMADIR']:
raise Errors.WafError('GSETTINGSSCHEMADIR not defined (should have been set up automatically during configure)')
if install_files:
self.bld.install_files(self.env['GSETTINGSSCHEMADIR'],install_files)
if not hasattr(self.bld,'_compile_schemas_registered'):
self.bld.add_post_fun(compile_schemas_callback)
self.bld._compile_schemas_registered=True
class glib_validate_schema(Task.Task):
run_str='rm -f ${GLIB_VALIDATE_SCHEMA_OUTPUT} && ${GLIB_COMPILE_SCHEMAS} --dry-run ${GLIB_COMPILE_SCHEMAS_OPTIONS} && touch ${GLIB_VALIDATE_SCHEMA_OUTPUT}'
color='PINK'
def configure(conf):
conf.find_program('glib-genmarshal',var='GLIB_GENMARSHAL')
conf.find_perl_program('glib-mkenums',var='GLIB_MKENUMS')
conf.find_program('glib-compile-schemas',var='GLIB_COMPILE_SCHEMAS',mandatory=False)
def getstr(varname):
return getattr(Options.options,varname,getattr(conf.env,varname,''))
gsettingsschemadir=getstr('GSETTINGSSCHEMADIR')
if not gsettingsschemadir:
datadir=getstr('DATADIR')
if not datadir:
prefix=conf.env['PREFIX']
datadir=os.path.join(prefix,'share')
gsettingsschemadir=os.path.join(datadir,'glib-2.0','schemas')
conf.env['GSETTINGSSCHEMADIR']=gsettingsschemadir
def options(opt):
opt.add_option('--gsettingsschemadir',help='GSettings schema location [Default: ${datadir}/glib-2.0/schemas]',default='',dest='GSETTINGSSCHEMADIR')
|
gpl-2.0
|
amdw/dotsandboxes
|
fig_3by3samplegame.py
|
1
|
1966
|
#!/usr/bin/env python3
# Copyright 2016 Andrew Medworth ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Generate SVG diagram for sample game played according
to Berlekamp's winning 3x3 strategy
"""
import svg
def main():
"""Entry point"""
layout = svg.Layout(grid_width=4, grid_margin=100)
pos = svg.DotsAndBoxesPosition(3, 3, layout=layout)
pos.move_highlight_and_add(1, 2, "bottom")
pos.move_highlight_and_add(1, 2, "left")
pos.move_highlight_and_add(1, 1, "top")
pos.move_highlight_and_add(0, 1, "top")
pos.move_highlight_and_add(2, 1, "top")
pos.move_highlight_and_add(1, 1, "right")
pos.move_highlight_and_add(1, 0, "top")
pos.move_highlight_and_add(0, 1, "left")
pos.move_highlight_and_add(0, 0, "top")
pos.move_highlight_and_add(0, 0, "right")
pos.move_highlight_and_add(0, 0, "left")
pos.move_highlight_and_add(1, 0, "right")
pos.move_highlight_and_add(0, 2, "left")
pos.move_highlight_and_add(2, 0, "top")
pos.move_highlight_and_add(2, 0, "right")
pos.move_highlight_and_add(2, 2, "bottom")
pos.move_highlight_and_add(2, 2, "right")
pos.move_highlight_and_add(0, 2, "bottom")
pos.move_highlight_and_add(0, 2, "top")
pos.move_highlight_and_add(0, 1, "right")
layout.render()
if __name__ == '__main__':
main()
|
agpl-3.0
|
KonishchevDmitry/vkfeed
|
vkfeed/tools/html_parser.py
|
16
|
8734
|
'''A convenient class for parsing HTML pages.'''
from __future__ import unicode_literals
from HTMLParser import HTMLParser
import logging
import re
from vkfeed.core import Error
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
class HTMLPageParser(HTMLParser):
'''A convenient class for parsing HTML pages.'''
tag_name_regex = '[a-zA-Z][-.a-zA-Z0-9:_]*'
'''A regular expression for tag name.'''
attribute_name_regex = tag_name_regex
'''A regular expression for attribute name.'''
tag_attrs_regex = re.sub(r'\s*', '', r'''
(?:\s+
''' + attribute_name_regex + r'''
(?:\s*=\s*
(?:
'[^']*'
|"[^"]*"
|[^'"/>\s]+
)
)?
)*
''')
'''A regular expression for tag attributes.'''
script_regex = re.compile('<script' + tag_attrs_regex + '>.*?</script>', re.DOTALL | re.IGNORECASE)
'''A regular expression for matching scripts.'''
__invalid_tag_attr_spacing_regex = re.compile(r'''
(
# Tag name
<''' + tag_name_regex + r'''
# Zero or several attributes
''' + tag_attrs_regex + r'''
# Two attributes without a space between them
\s+ # whitespace before attribute name
''' + attribute_name_regex + r''' # attribute name
\s*=\s* # value indicator
(?:
'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
)
)
([^\s>]) # Do not include / to make the preparation replacement for __invalid_tag_attr_regex
''', re.VERBOSE)
'''
A regular expression for matching a common error in specifying tag
attributes.
'''
__invalid_tag_attr_regex = re.compile(r'''
(
# Tag name
<''' + tag_name_regex + r'''
# Zero or several attributes
''' + tag_attrs_regex + r'''
)
\s+(?:
# Invalid characters instead of an attribute
[^\sa-zA-Z/>]\S*
|
# Sole slash
/\s
|
# Invalid characters starting from slash instead of an attribute
/[^>\s]+
)
''', re.VERBOSE)
'''
A regular expression for matching HTML errors like:
<a class="app photo"/app2322149_58238998?from_id=2381857&loc=addneighbour onclick="return cur.needLoginBox()">
'''
__empty_tags = 'area|base|basefont|br|col|frame|hr|img|input|link|meta|param'
'''A list of all HTML empty tags.'''
__misopened_tag_regex = re.compile(r'<(' + __empty_tags + tag_attrs_regex + r')\s*>', re.IGNORECASE)
'''A regular expression for matching opened tags that should be closed.'''
__tag_stack = None
'''A stack of currently opened HTML tags.'''
__cur_data = None
'''
Accumulates data between handle_charref(), handle_entityref() and
handle_data() calls.
'''
def __init__(self):
HTMLParser.__init__(self)
def handle_charref(self, name):
'''Handles a character reference of the form &#ref;.'''
self.__accumulate_data('&#' + name + ';')
def handle_data(self, data):
'''Handles data.'''
self.__accumulate_data(data)
def handle_endtag(self, tag_name):
'''Handles end of a tag.'''
self.__handle_data_if_exists()
if self.__get_cur_tag()['name'] == tag_name:
self.__close_tag(self.__tag_stack.pop())
else:
for tag_id in xrange(len(self.__tag_stack) - 1, -1, -1):
if self.__tag_stack[tag_id]['name'] == tag_name:
for tag in reversed(self.__tag_stack[tag_id + 1:]):
self.__close_tag(tag, forced = True)
self.__tag_stack.pop()
self.__close_tag(self.__tag_stack.pop())
break
else:
LOG.debug('Dropping excess end tag "%s"...', tag_name)
def handle_entityref(self, name):
'''Handles a general entity reference of the form &name;.'''
self.__accumulate_data('&' + name + ';')
def handle_root_data(self, tag, data):
'''Handles data inside of the root of the document.'''
LOG.debug('%s', data)
def handle_root(self, tag, attrs, empty):
'''Handles a tag inside of the root of the document.'''
LOG.debug('<%s %s%s>', tag['name'], attrs, '/' if empty else '')
tag['new_tag_handler'] = self.handle_root
tag['data_handler'] = self.handle_root_data
tag['end_tag_handler'] = self.handle_root_end
def handle_root_end(self, tag):
'''Handles end of the root of the document.'''
LOG.debug('</%s>', tag['name'])
def handle_startendtag(self, tag, attrs):
'''Handles start of an XHTML-style empty tag.'''
self.__handle_data_if_exists()
self.__handle_start_tag(tag, attrs, True)
def handle_starttag(self, tag, attrs):
'''Handles start of a tag.'''
self.__handle_data_if_exists()
self.__handle_start_tag(tag, attrs, False)
def reset(self):
'''Resets the parser.'''
HTMLParser.reset(self)
self.__tag_stack = [{
# Add fake root tag
'name': None,
'new_tag_handler': self.handle_root,
'data_handler': self.handle_root_data,
'end_tag_handler': self.handle_root_end,
}]
def parse(self, html):
'''Parses the specified HTML page.'''
html = self.__fix_html(html)
self.reset()
try:
# Run the parser
self.feed(html)
self.close()
finally:
# Close all unclosed tags
for tag in self.__tag_stack[1:]:
self.__close_tag(tag, True)
def __accumulate_data(self, data):
'''
Accumulates data between handle_charref(), handle_entityref() and
handle_data() calls.
'''
if self.__cur_data is None:
self.__cur_data = data
else:
self.__cur_data += data
def __close_tag(self, tag, forced = False):
'''Forces closing of an unclosed tag.'''
if forced:
LOG.debug('Force closing of unclosed tag "%s".', tag['name'])
else:
LOG.debug('Tag %s closed.', tag)
if 'end_tag_handler' in tag:
tag['end_tag_handler'](tag)
LOG.debug('Current tag: %s.', self.__get_cur_tag())
def __fix_html(self, html):
'''Fixes various things that may confuse the Python's HTML parser.'''
html = self.script_regex.sub('', html)
loop_replacements = (
lambda html: self.__invalid_tag_attr_spacing_regex.subn(r'\1 \2', html),
lambda html: self.__invalid_tag_attr_regex.subn(r'\1 ', html),
)
for loop_replacement in loop_replacements:
for i in xrange(0, 1000):
html, changed = loop_replacement(html)
if not changed:
break
else:
raise Error('Too many errors in the HTML or infinite loop.')
html = self.__misopened_tag_regex.sub(r'<\1 />', html)
return html
def __get_cur_tag(self):
'''Returns currently opened tag.'''
return self.__tag_stack[-1]
def __handle_data_if_exists(self):
'''Handles accumulated data (if exists).'''
data = self.__cur_data
if data is None:
return
self.__cur_data = None
tag = self.__get_cur_tag()
handler = tag.get('data_handler')
if handler is not None:
LOG.debug('Data "%s" in "%s" with handler %s.',
data, tag['name'], handler.func_name)
handler(tag, data)
def __handle_start_tag(self, tag_name, attrs, empty):
'''Handles start of any tag.'''
tag = { 'name': tag_name }
handler = self.__get_cur_tag().get('new_tag_handler')
if handler is not None:
attrs = self.__parse_attrs(attrs)
LOG.debug('Start tag: %s %s with handler %s.',
tag, attrs, handler.func_name)
handler(tag, attrs, empty)
if not empty:
self.__tag_stack.append(tag)
def __parse_attrs(self, attrs_tuple):
'''Converts tag attributes from a tuple to a dictionary.'''
attrs = {}
for attr, value in attrs_tuple:
attrs[attr.lower()] = value
return attrs
|
bsd-2-clause
|
sabi0/intellij-community
|
python/lib/Lib/csv.py
|
87
|
15210
|
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "excel", "excel_tab", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe an Excel dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError, e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
def next(self):
row = self.reader.next()
if self.fieldnames is None:
self.fieldnames = row
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError, \
("extrasaction (%s) must be 'raise' or 'ignore'" %
extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
for k in rowdict.keys():
if k not in self.fieldnames:
raise ValueError, "dict contains fields not in fieldnames"
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error, "Could not determine delimiter"
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
doublequote = False
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
return ('', None, 0) # (quotechar, delimiter, skipinitialspace)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = reduce(lambda a, b, quotes = quotes:
(quotes[a] > quotes[b]) and a or b, quotes.keys())
if delims:
delim = reduce(lambda a, b, delims = delims:
(delims[a] > delims[b]) and a or b, delims.keys())
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
return (quotechar, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of freqencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = filter(None, data.split('\n'))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = charFrequency[char].items()
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
items)
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- reduce(lambda a, b: (0, a[1] + b[1]),
items)[1])
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = delims.keys()[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = rdr.next() # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in columnTypes.keys():
for thisType in [int, long, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
# treat longs as ints
if thisType == long:
thisType = int
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
|
apache-2.0
|
vinegret/youtube-dl
|
youtube_dl/extractor/nzz.py
|
20
|
1409
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
)
class NZZIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?nzz\.ch/(?:[^/]+/)*[^/?#]+-ld\.(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.nzz.ch/zuerich/gymizyte/gymizyte-schreiben-schueler-heute-noch-diktate-ld.9153',
'info_dict': {
'id': '9153',
},
'playlist_mincount': 6,
}, {
'url': 'https://www.nzz.ch/video/nzz-standpunkte/cvp-auf-der-suche-nach-dem-mass-der-mitte-ld.1368112',
'info_dict': {
'id': '1368112',
},
'playlist_count': 1,
}]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
entries = []
for player_element in re.findall(
r'(<[^>]+class="kalturaPlayer[^"]*"[^>]*>)', webpage):
player_params = extract_attributes(player_element)
if player_params.get('data-type') not in ('kaltura_singleArticle',):
self.report_warning('Unsupported player type')
continue
entry_id = player_params['data-id']
entries.append(self.url_result(
'kaltura:1750922:' + entry_id, 'Kaltura', entry_id))
return self.playlist_result(entries, page_id)
|
unlicense
|
kaarolch/ansible
|
lib/ansible/playbook/become.py
|
63
|
4030
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Become:
# Privilege escalation
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_flags = FieldAttribute(isa='string')
def __init__(self):
return super(Become, self).__init__()
def _detect_privilege_escalation_conflict(self, ds):
# Fail out if user specifies conflicting privilege escalations
has_become = 'become' in ds or 'become_user'in ds
has_sudo = 'sudo' in ds or 'sudo_user' in ds
has_su = 'su' in ds or 'su_user' in ds
if has_become:
msg = 'The become params ("become", "become_user") and'
if has_sudo:
raise AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg)
elif has_su:
raise AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg)
elif has_sudo and has_su:
raise AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
def _preprocess_data_become(self, ds):
"""Preprocess the playbook data for become attributes
This is called from the Base object's preprocess_data() method which
in turn is called pretty much anytime any sort of playbook object
(plays, tasks, blocks, etc) is created.
"""
self._detect_privilege_escalation_conflict(ds)
# Privilege escalation, backwards compatibility for sudo/su
if 'sudo' in ds or 'sudo_user' in ds:
ds['become_method'] = 'sudo'
if 'sudo' in ds:
ds['become'] = ds['sudo']
del ds['sudo']
if 'sudo_user' in ds:
ds['become_user'] = ds['sudo_user']
del ds['sudo_user']
display.deprecated("Instead of sudo/sudo_user, use become/become_user and make sure become_method is 'sudo' (default)")
elif 'su' in ds or 'su_user' in ds:
ds['become_method'] = 'su'
if 'su' in ds:
ds['become'] = ds['su']
del ds['su']
if 'su_user' in ds:
ds['become_user'] = ds['su_user']
del ds['su_user']
display.deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
return ds
def set_become_defaults(self, become, become_method, become_user):
''' if we are becoming someone else, but some fields are unset,
make sure they're initialized to the default config values '''
if become:
if become_method is None:
become_method = C.DEFAULT_BECOME_METHOD
if become_user is None:
become_user = C.DEFAULT_BECOME_USER
|
gpl-3.0
|
rcharp/toyota-flask
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/constants.py
|
3008
|
1335
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
|
apache-2.0
|
cchurch/ansible
|
test/units/modules/net_tools/nios/test_nios_nsgroup.py
|
52
|
4511
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_nsgroup
from ansible.module_utils.net_tools.nios import api
from units.compat.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosNSGroupModule(TestNiosModule):
module = nios_nsgroup
def setUp(self):
super(TestNiosNSGroupModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_nsgroup.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_nsgroup.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_nsgroup.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosNSGroupModule, self).tearDown()
self.mock_wapi.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_nsgroup_create(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'my-simple-group',
'comment': None, 'grid_primary': None}
test_object = None
test_spec = {
"name": {"ib_req": True},
"comment": {},
"grid_primary": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__()})
def test_nios_nsgroup_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'name': 'my-simple-group',
'comment': None, 'grid_primary': None}
ref = "nsgroup/ZG5zLm5ldHdvcmtfdmlldyQw:ansible/false"
test_object = [{
"comment": "test comment",
"_ref": ref,
"name": "my-simple-group",
"grid_primary": {'name': 'infoblox-test.example.com'}
}]
test_spec = {
"name": {"ib_req": True},
"comment": {},
"grid_primary": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
def test_nios_nsgroup_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'default',
'comment': 'updated comment', 'grid_primary': None}
test_object = [
{
"comment": "test comment",
"_ref": "nsgroup/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "default",
"grid_primary": {}
}
]
test_spec = {
"name": {"ib_req": True},
"comment": {},
"grid_primary": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.update_object.called_once_with(test_object)
|
gpl-3.0
|
SUSE/kiwi
|
kiwi/privileges.py
|
1
|
1256
|
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import os
# project
from .exceptions import (
KiwiPrivilegesError
)
class Privileges:
"""
**Implements check for root privileges**
"""
@staticmethod
def check_for_root_permissions():
"""
Check if we are effectively root on the system. If not
an exception is thrown
:return: True or raise an Exception
:rtype: bool
"""
if os.geteuid() != 0:
raise KiwiPrivilegesError(
'operation requires root permissions'
)
return True
|
gpl-3.0
|
Endika/python-digitalocean
|
digitalocean/SSHKey.py
|
11
|
2610
|
# -*- coding: utf-8 -*-
from .baseapi import BaseAPI, GET, POST, DELETE, PUT
class SSHKey(BaseAPI):
def __init__(self, *args, **kwargs):
self.id = ""
self.name = None
self.public_key = None
self.fingerprint = None
super(SSHKey, self).__init__(*args, **kwargs)
@classmethod
def get_object(cls, api_token, ssh_key_id):
"""
Class method that will return a SSHKey object by ID.
"""
ssh_key = cls(token=api_token, id=ssh_key_id)
ssh_key.load()
return ssh_key
def load(self):
"""
Load the SSHKey object from DigitalOcean.
Requires either self.id or self.fingerprint to be set.
"""
identifier = None
if self.id is not None:
identifier = self.id
elif self.fingerprint is not None:
identifier = self.fingerprint
data = self.get_data("account/keys/%s" % identifier, type=GET)
ssh_key = data['ssh_key']
# Setting the attribute values
for attr in ssh_key.keys():
setattr(self, attr, ssh_key[attr])
self.id = ssh_key['id']
def load_by_pub_key(self, public_key):
"""
This method will laod a SSHKey object from DigitalOcean
from a public_key. This method will avoid problem like
uploading the same public_key twice.
"""
data = self.get_data("account/keys/")
for jsoned in data['ssh_keys']:
if jsoned.get('public_key', "") == public_key:
self.id = jsoned['id']
self.load()
return self
return None
def create(self):
"""
Create the SSH Key
"""
input_params = {
"name": self.name,
"public_key": self.public_key,
}
data = self.get_data("account/keys/", type=POST, params=input_params)
if data:
self.id = data['ssh_key']['id']
def edit(self):
"""
Edit the SSH Key
"""
input_params = {
"name": self.name,
"public_key": self.public_key,
}
data = self.get_data(
"account/keys/%s" % self.id,
type=PUT,
params=input_params
)
if data:
self.id = data['ssh_key']['id']
def destroy(self):
"""
Destroy the SSH Key
"""
return self.get_data("account/keys/%s" % self.id, type=DELETE)
def __str__(self):
return "%s %s" % (self.id, self.name)
|
lgpl-3.0
|
NeCTAR-RC/nova
|
nova/tests/functional/api_sample_tests/test_limits.py
|
9
|
1785
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class LimitsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "limits"
def setUp(self):
super(LimitsSampleJsonTest, self).setUp()
# NOTE(gmann): We have to separate the template files between V2
# and V2.1 as the response are different.
self.template = 'limit-get-resp'
if self._legacy_v2_code:
self.template = 'v2-limit-get-resp'
def _get_flags(self):
f = super(LimitsSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append("nova.api.openstack.compute."
"legacy_v2.contrib.server_group_quotas."
"Server_group_quotas")
return f
def test_limits_get(self):
response = self._do_get('limits')
self._verify_response(self.template, {}, response, 200)
|
apache-2.0
|
quantifiedcode-bot/invenio-search
|
invenio_search/errors.py
|
20
|
1734
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012,
# 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
class InvenioWebSearchUnknownCollectionError(Exception):
"""Exception for bad collection."""
def __init__(self, colname):
"""Initialisation."""
self.colname = colname
def __str__(self):
"""String representation."""
return repr(self.colname)
class InvenioWebSearchWildcardLimitError(Exception):
"""Exception raised when query limit reached."""
def __init__(self, res):
"""Initialization."""
self.res = res
class InvenioWebSearchReferstoLimitError(Exception):
"""Raise when CFG_WEBSEARCH_MAX_RECORDS_REFERSTO limit is reached."""
def __init__(self, res):
"""Initialization."""
self.res = res
class InvenioWebSearchCitedbyLimitError(Exception):
"""Raise when CFG_WEBSEARCH_MAX_RECORDS_CITEDBY limit is reached."""
def __init__(self, res):
"""Initialization."""
self.res = res
|
gpl-2.0
|
benfinke/ns_python
|
build/scripts-2.7/get_config.py
|
3
|
57309
|
#!/usr/bin/env python
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver import lbvserver
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service import service
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding import lbvserver_service_binding
from nssrc.com.citrix.netscaler.nitro.resource.stat.lb.lbvserver_stats import lbvserver_stats
from nssrc.com.citrix.netscaler.nitro.service.nitro_service import nitro_service
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.util.filtervalue import filtervalue
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver import lbvserver
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_cachepolicy_binding import lbvserver_cachepolicy_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding import lbvserver_service_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.appfw.appfwconfidfield import appfwconfidfield
from nssrc.com.citrix.netscaler.nitro.resource.config.appfw.appfwlearningdata import appfwlearningdata
from nssrc.com.citrix.netscaler.nitro.resource.config.appfw.appfwlearningdata_args import appfwlearningdata_args
from nssrc.com.citrix.netscaler.nitro.resource.config.appfw.appfwprofile import appfwprofile
from nssrc.com.citrix.netscaler.nitro.resource.config.audit.auditnslogaction import auditnslogaction
from nssrc.com.citrix.netscaler.nitro.resource.config.audit.auditsyslogparams import auditsyslogparams
from nssrc.com.citrix.netscaler.nitro.resource.config.authorization.authorizationpolicylabel_binding import authorizationpolicylabel_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service import service
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_binding import service_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.service_lbmonitor_binding import service_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.cache.cacheobject import cacheobject
from nssrc.com.citrix.netscaler.nitro.resource.config.cmp.cmppolicy_lbvserver_binding import cmppolicy_lbvserver_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.dns.dnsnsecrec import dnsnsecrec
from nssrc.com.citrix.netscaler.nitro.resource.config.dns.dnssuffix import dnssuffix
from nssrc.com.citrix.netscaler.nitro.resource.config.dns.dnsview_dnspolicy_binding import dnsview_dnspolicy_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.dns.dnszone import dnszone
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbldnsentries import gslbldnsentries
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbparameter import gslbparameter
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice import gslbservice
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice_binding import gslbservice_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbsite import gslbsite
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbvserver import gslbvserver
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbvserver_gslbservice_binding import gslbvserver_gslbservice_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.ha.hanode import hanode
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_binding import lbvserver_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.network.Interface import Interface
from nssrc.com.citrix.netscaler.nitro.resource.config.network.channel import channel
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsacl import nsacl
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsip import nsip
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsip_args import nsip_args
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nslimitidentifier import nslimitidentifier
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nstcpbufparam import nstcpbufparam
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsversion import nsversion
from nssrc.com.citrix.netscaler.nitro.resource.config.ns.nsxmlnamespace import nsxmlnamespace
from nssrc.com.citrix.netscaler.nitro.resource.config.policy.policyexpression import policyexpression
from nssrc.com.citrix.netscaler.nitro.resource.config.policy.policyexpression_args import policyexpression_args
from nssrc.com.citrix.netscaler.nitro.resource.config.protocol.protocolhttpband import protocolhttpband
from nssrc.com.citrix.netscaler.nitro.resource.config.protocol.protocolhttpband_args import protocolhttpband_args
from nssrc.com.citrix.netscaler.nitro.resource.config.snmp.snmpgroup import snmpgroup
from nssrc.com.citrix.netscaler.nitro.resource.config.snmp.snmpmanager import snmpmanager
from nssrc.com.citrix.netscaler.nitro.resource.config.snmp.snmpoid import snmpoid
from nssrc.com.citrix.netscaler.nitro.resource.config.snmp.snmpoid_args import snmpoid_args
from nssrc.com.citrix.netscaler.nitro.resource.config.snmp.snmptrap import snmptrap
from nssrc.com.citrix.netscaler.nitro.resource.config.snmp.snmpuser import snmpuser
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslcertkey import sslcertkey
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslcipher_binding import sslcipher_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslfipskey import sslfipskey
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslpolicy_binding import sslpolicy_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslpolicy_csvserver_binding import sslpolicy_csvserver_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.system.systemgroup_binding import systemgroup_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.transform.transformprofile_transformaction_binding import transformprofile_transformaction_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.vpn.vpnglobal_authenticationldappolicy_binding import vpnglobal_authenticationldappolicy_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.vpn.vpnglobal_vpnclientlessaccesspolicy_binding import vpnglobal_vpnclientlessaccesspolicy_binding
class get_config:
def __init__(self):
_ip=""
_username=""
_password=""
@staticmethod
def main(cls, args_):
if(len(args_) < 3):
print("Usage: run.bat <ip> <username> <password>")
return
config = get_config()
config.ip = args_[1]
config.username = args_[2]
config.password = args_[3]
client = None
try:
client = nitro_service(config.ip,"http")
client.set_credential(config.username,config.password)
client.timeout = 500
config.run_sample(client)
client.logout()
except nitro_exception as e:
print("Exception::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e:
print("Exception::message="+str(e.args))
return
#Getting GSLB vserver, service and site
def get_gslbvserver(self, client) :
try :
result = gslbvserver.get(client, "newgvip1")
if result :
print("get_gslbvserver - name= "+result.name + ", servicetype= " + result.servicetype)
else :
print("get_gslbvserver - Done")
except nitro_exception as e :
print("Exception::get_gslbvserver::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_gslbvserver::message="+str(e.args))
def get_gslbservice(self, client) :
try :
result = gslbservice.get(client, "newsvc0")
if result :
print("get_gslbservice - servicename= "+result.servicename + ", servicetype= " + result.servicetype)
else :
print("get_gslbservice - Done")
except nitro_exception as e :
print("Exception::get_gslbservice::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_gslbservice::message="+str(e.args))
def get_gslbsite(self, client) :
try :
result = gslbsite.get(client, "bangalore1")
if result :
print("get_gslbsite - sitename= "+result.sitename + ", siteipaddress= " + result.siteipaddress)
else :
print("get_gslbsite - Done")
except nitro_exception as e :
print("Exception::get_gslbsite::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_gslbsite::message="+str(e.args))
def get_gslbvserver_service_binding(self, client) :
try :
result = gslbvserver_gslbservice_binding.get(client, "newgvip1")
if result :
for i in range(len(result)) :
print("get_gslbvserver_service_binding - vserver name= "+result[i].name + ", servicename= " + result[i].servicename)
else :
print("get_gslbvserver_service_binding - Done")
except nitro_exception as e :
print("Exception::get_gslbvserver_service_binding::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_gslbvserver_service_binding::message="+str(e.args))
def get_vpnglobal_authpol(self, client) :
try :
result = vpnglobal_authenticationldappolicy_binding.get(client)
if result :
for i in range(len(result)) :
print("get_vpnglobal_authpol - version= "+result[i].policyname + ", secondary= " + str(result[i].secondary))
else :
print("Exception::get_vpnglobal_authpol - Done")
except nitro_exception as e :
print("Exception::get_vpnglobal_authpol::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_vpnglobal_authpol::message="+str(e.args))
def getlbvs_svc_bind_bulk(self, client) :
try :
str_ = ["v1", "v2"]
result = lbvserver_binding.get(client, str_)
if result :
for i in range(len(result)) :
if result[i].get_lbvserver_service_bindings() :
print("getlbvs_svc_bind_bulk - version= "+result[i].name + ", services= " + result[i].lbvserver_service_bindings.length)
else :
print("getlbvs_svc_bind_bulk - Done")
except nitro_exception as e :
print("Exception::getlbvs_svc_bind_bulk::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::getlbvs_svc_bind_bulk::message="+str(e.args))
def getlbvserver_bulk(self, client) :
try :
str_ = ["v1", "v2"]
result = lbvserver.get(client, str_)
if result :
for i in range(len(result)) :
print("getlbvserver_bulk - version= "+result[i].name + ", lbmethod= " + result[i].lbmethod)
else :
print("getlbvserver_bulk - Done")
except nitro_exception as e :
print("Exception::getlbvserver_bulk::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::getlbvserver_bulk::message="+str(e.args))
def get_nsversion(self, client) :
try :
result = nsversion.get(client)
if result :
print("get_nsversion - version= "+result[0].version + ", mode= " + result[0].mode)
else :
print("get_nsversion - Done")
except nitro_exception as e :
print("Exception::get_nsversion::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_nsversion::message="+str(e.args))
def count_snmpoid(self, client) :
try :
obj = snmpoid()
obj.entitytype = "VSERVER"
count = snmpoid.count(client, obj)
print("count_snmpoid - count:"+str(count))
except nitro_exception as e :
print("Exception::count_snmpoid::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::count_snmpoid::message="+str(e.args))
def get_nsacl(self, client) :
try :
result = nsacl.get(client, "xyz")
if result :
print("get_nsacl - aclname= "+result.aclname + ", kernelstate= " + result.kernelstate)
else :
print("get_nsacl - Done")
except nitro_exception as e :
print("Exception::get_nsacl::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_nsacl::message="+str(e.args))
def get_nsxmlnamespace(self, client) :
try :
result = nsxmlnamespace.get(client)
if result :
for i in range(len(result)) :
print("get_nsxmlnamespace - prefix= "+result[i].prefix + ", namespace= " + result[i].Namespace)
else :
print("Exception::get_nsxmlnamespace - Done")
except nitro_exception as e :
print("Exception::get_nsxmlnamespace::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_nsxmlnamespace::message="+str(e.args))
def get_nstcpbufparam(self, client) :
try :
result = nstcpbufparam.get(client)
if result :
for i in range(len(result)) :
print("get_nstcpbufparam - size= "+str(result[i].size) + ", memlimit= " + str(result[i].memlimit))
else :
print("Exception::get_nstcpbufparam - Done")
except nitro_exception as e :
print("Exception::get_nstcpbufparam::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_nstcpbufparam::message="+str(e.args))
def get_nslimitidentifier(self, client) :
try :
result = nslimitidentifier.get(client)
if result :
for i in range(len(result)) :
print("get_nslimitidentifier - timeslice: "+str(result[i].timeslice))
else :
print("get_nslimitidentifier - Done")
except nitro_exception as e :
print("Exception::get_nslimitidentifier::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_nslimitidentifier::message="+str(e.args))
def get_sslfipskey(self, client) :
try :
result = sslfipskey.get(client)
if result :
for i in range(len(result)) :
print("get_sslfipskey - certkey: "+result[i].fipskeyname)
else :
print("get_sslfipskey - Done")
except nitro_exception as e :
print("Exception::get_sslfipskey::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_sslfipskey::message="+str(e.args))
def get_sslcertkey(self, client) :
try :
result = sslcertkey.get(client)
if result :
for i in range(len(result)) :
print("get_sslcertkey - certkey: "+result[i].certkey)
else :
print("get_sslcertkey - Done")
except nitro_exception as e :
print("Exception::get_sslcertkey::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_sslcertkey::message="+str(e.args))
def get_nsip(self, client) :
try :
obj = nsip()
obj.ipaddress = "1.1.1.77"
if obj :
result = nsip.get(client, obj)
print("get_nsip - metric"+result.metric+ ", flags=" +result.flags+ ", ospfarea"+ result.ospfarea+ ", ospfareaval=" +result.ospfareaval)
else :
print("get_nsip - Done")
except nitro_exception as e :
print("Exception::get_nsip::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_nsip::message="+str(e.args))
def get_dnszone(self, client) :
try :
result = dnszone.get(client)
if result :
for i in range(len(result)) :
print(" zone:"+result[i].zonename)
else :
print("get_dnszone - Done")
except nitro_exception as e :
print("Exception::get_dnszone::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_dnszone::message="+str(e.args))
def get_dnsnsecrec(self, client) :
try :
result = dnsnsecrec.get(client)
if result :
for i in range(len(result)) :
print(" host:"+result[i].hostname)
else :
print("get_dnsnsecrec - Done")
except nitro_exception as e :
print("Exception::get_dnsnsecrec::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_dnsnsecrec::message="+str(e.args))
def get_dnsview_dnspol_binding(self, client) :
try :
obj = dnsview_dnspolicy_binding()
obj.viewname = "xx"
result = dnsview_dnspolicy_binding.get_filtered(client, "xx", "dnspolicyname:pol1")
if result :
for i in range(len(result)) :
print(" pol:"+result[i].dnspolicyname)
else :
print("get_dnsview_dnspol_binding - Done")
except nitro_exception as e :
print("Exception::get_dnsview_dnspol_binding::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_dnsview_dnspol_binding::message="+str(e.args))
def get_snmptrap(self, client) :
try :
str_ = "10.102.1.2"
obj = snmptrap()
obj.trapclass = "generic"
obj.trapdestination = str_
obj = snmptrap.get(client, obj)
if obj :
print("get_snmptrap port:"+str(obj.get_destport())+" description: " +obj.get_trapdestination()+" srcIP: "+obj.get_srcip())
else :
print("get_snmptrap - Done")
except nitro_exception as e :
print("Exception::get_snmptrap::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_snmptrap::message="+str(e.args))
def get_snmpuser(self, client) :
try :
obj = snmpuser()
obj.name = "u1"
obj = snmpuser.get(client, "u1")
if obj :
print("get_snmpuser Name:"+obj.name+" grpName: " +obj.group+" storage Type: "+obj.storagetype)
else :
print("get_snmpuser - Done")
except nitro_exception as e :
print("Exception::get_snmpuser::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_snmpuser::message="+str(e.args))
def get_snmpmanager(self, client) :
try :
obj = snmpmanager()
obj.ipaddress = "10.102.31.20"
if obj :
obj1 = snmpmanager.get(client, obj)
if obj1 :
print("get_snmpmanager id:"+obj1.ipaddress+"description" +obj1.netmask)
else :
print("get_snmpmanager - Done")
except nitro_exception as e :
print("Exception::get_snmpmanager::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_snmpmanager::message="+str(e.args))
def get_channel(self, client) :
try :
obj = channel.get(client,"LA/1")
if obj :
print("get_channel id:"+obj.id+", description" +obj.description)
else :
print("get_channel - Done")
except nitro_exception as e :
print("Exception::get_channel::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_channel::message="+str(e.args))
def get_interface(self, client) :
try :
obj = Interface.get(client, "1/3")
if obj :
print("get_interface id:"+obj.id+", reqduplex" +obj.reqduplex)
else :
print("get_interface - Done")
except nitro_exception as e :
print("Exception::get_interface::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_interface::message="+str(e.args))
def count_filtered_lbvserver_svc_bindings(self, client):
try:
filter_params = []
filter_params = [ filtervalue() for _ in range(2)]
filter_params[0] = filtervalue("servicetype","HTTP")
filter_params[1] = filtervalue("port","80")
response = lbvserver_service_binding.count_filtered(client, "lb1", filter_params)
print("count_filtered_lbvserver_svc_bindings:: " + str(response))
except nitro_exception as e:
print("Exception::count_filtered_lbvserver_svc_bindings::errocode - "+str(e.errorcode)+", message - "+e.message)
except Exception as e:
print("Exception::count_filtered_lbvserver_svc_bindings:: "+str(e.args))
def count_lbvserver(self, client):
try:
lbvs_count = lbvserver.count(client)
print("count_lbvserver::"+str(lbvs_count))
except nitro_exception as e:
print("Exception::count_lbvserver::errorcode="+str(e.errorcode)+", Message= "+e.errorcode)
except Exception as e:
print("Exception::count_lbvserver::message="+str(e.args))
def count_lbvserver_service_binding(self, client):
try:
response = lbvserver_service_binding.count(client, "lb1")
print("count_lbvserver_service_binding:: " + str(response))
except nitro_exception as e:
print("Exception::count_lbvserver_service_binding::errocode - "+str(e.errorcode)+", message - "+e.message)
except Exception as e:
print("Exception::count_lbvserver_service_binding:: "+str(e.args))
def count_lbvserver_cachepolicy_binding(self, client) :
try :
lbvs_count = lbvserver_cachepolicy_binding.count(client,"lbvip1")
print("count_lbvserver_cachepolicy_binding : "+lbvs_count)
except nitro_exception as e :
print("Exception::count_lbvserver_cachepolicy_binding::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::count_lbvserver_cachepolicy_binding::message="+str(e.args))
def get_snmpoid(self, client) :
try :
obj = snmpoid()
obj.entitytype = "VSERVER"
result = snmpoid.get(client, obj)
if result :
print("get_snmpoid - enitity_name: "+result.entitytype+" name="+result.name+" cmnt= "+result.Snmpoid)
else :
print("Exception::get_snmpoid::Done")
except nitro_exception as e :
print("Exception::get_snmpoid::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_snmpoid::message="+str(e.args))
def get_svcmon_binds(self, client) :
try :
result = service_lbmonitor_binding.get(client, "s1")
print("get_svcmon_binds name="+str(len(result)))
if result :
for _ in range(len(result)) :
print("mon name: "+result[0].monitor_name)
else :
print("get_svcmon_binds - Done")
except nitro_exception as e :
print("Exception::get_svcmon_binds::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_svcmon_binds::message="+str(e.args))
def get_svcgrp_svr_bind(self, client) :
try :
result = servicegroup_servicegroupmember_binding.get(client, "svcgrp1")
print("get_svcgrp_svr_bind name="+str(len(result)))
if result :
for i in range(len(result)) :
print("svrip: "+result[i].ip)
else :
print("get_svcgrp_svr_bind - Done")
except nitro_exception as e :
print("Exception::get_svcgrp_svr_bind::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_svcgrp_svr_bind::message="+str(e.args))
def get_nsfeature(self, client) :
try :
features = client.get_features()
i=1
print("nsfeature on given NS: ")
for feature in features :
print("\t"+ str(i) +") "+feature)
i = i + 1
except nitro_exception as e :
print("Exception::get_nsfeature::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_nsfeature::message="+str(e.args))
def get_enabled_nsfeature(self, client) :
try :
enabled_features = client.get_enabled_features()
i=1
print("enabled nsfeatures: ")
for en_feature in enabled_features :
print("\t"+ str(i) +") "+en_feature)
i= i + 1
except nitro_exception as e :
print("Exception::get_enabled_nsfeature::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_enabled_nsfeature::message="+str(e.args))
def get_enabled_modes(self, client) :
try :
enabled_modes = client.get_enabled_modes()
i =1
print("enabled nsmodes: ")
for en_mode in enabled_modes :
print("\t"+ str(i) +") "+en_mode)
i = i + 1
except nitro_exception as e :
print("Exception::get_enabled_modes::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_enabled_modes::message="+str(e.args))
def get_nsmode(self, client) :
try :
modes = client.get_modes()
i =1
print("nsmodes on given NS: ")
for mode in modes :
print("\t"+ str(i) +") "+mode)
i= i + 1
except nitro_exception as e :
print("Exception::get_nsmode::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_nsmode::message="+str(e.args))
def get_sslcipher_binds(self, client) :
try :
result = sslcipher_binding.get(client, "g1")
if result :
print("get_sslcipher_binds name="+result.sslcipher_individualcipher_bindings.length)
else :
print("get_sslcipher_binds - Done")
except nitro_exception as e :
print("Exception::get_sslcipher_binds::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_sslcipher_binds::message="+str(e.args))
def get_vpnglobal_vpnclientlessaccesspolicy_bindings(self, client) :
try :
result = vpnglobal_vpnclientlessaccesspolicy_binding.get(client)
if result :
print("get_vpnglobal_binds name="+str(len(result)))
else :
print("get_vpnglobal_binds - Done")
except nitro_exception as e :
print("Exception::get_vpnglobal_binds::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_vpnglobal_binds::message="+str(e.args))
def get_auth_bindings(self, client) :
try :
result = authorizationpolicylabel_binding.get(client, "trans_http_url")
if result :
print("get_auth_bindings name="+result.labelname)
if result.get_authorizationpolicylabel_authorizationpolicy_bindings() :
for i in range(len(result.authorizationpolicylabel_authorizationpolicy_bindings)) :
print(" auth cmd policies polname="+result.authorizationpolicylabel_authorizationpolicy_bindings()[i].policyname+" priority="+result.get_authorizationpolicylabel_authorizationpolicy_bindings()[i].priority+"invoke"+result.get_authorizationpolicylabel_authorizationpolicy_bindings()[i].invoke)
else :
print("get_auth_bindings - Done")
except nitro_exception as e :
print("Exception::get_auth_bindings::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_auth_bindings::message="+str(e.args))
def get_systemgrp_binds(self, client) :
try :
result = systemgroup_binding.get(client, "g1")
if result :
print("get_systemgrp_binds name="+result.groupname)
if(result.get_systemgroup_systemcmdpolicy_bindings()) :
for i in range(len(result.get_systemgroup_systemcmdpolicy_bindings())) :
print("system cmd policies polname="+result.get_systemgroup_systemcmdpolicy_bindings()[i].policyname+" priority="+result.get_systemgroup_systemcmdpolicy_bindings()[i].priority)
if(result.get_systemgroup_systemuser_bindings()) :
for i in range(len(result.get_systemgroup_systemuser_bindings())) :
print("system cmd user username="+result.get_systemgroup_systemuser_bindings()[i].username)
except nitro_exception as e :
print("Exception::get_systemgrp_binds::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_systemgrp_binds::message="+str(e.args))
def get_gslbservice_binds(self, client) :
try :
result = gslbservice_binding.get(client, "sj_svc")
if result :
print("get_gslbservice_binds name="+result.servicename+", viewname=" + result.get_gslbservice_dnsview_bindings()[0].viewname)
else :
print("get_gslbservice_binds - Done")
except nitro_exception as e :
print("Exception::get_gslbservice_binds::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_gslbservice_binds::message="+str(e.args))
def get_gslbldnsentries(self, client) :
try :
result = gslbldnsentries.get(client)
if result :
print("get_gslbldnsentries result::length="+str(len(result)))
else :
print("get_gslbldnsentries - Done")
except nitro_exception as e :
print("Exception::get_gslbldnsentries::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_gslbldnsentries::message="+str(e.args))
def get_cmppolicy_bindings(self, client) :
try :
result = cmppolicy_lbvserver_binding.get(client, "pdf_cmp")
if result :
for i in range(len(result)) :
print("cmppol name="+result[i].name+", lbvserver=" + result[i].boundto)
else :
print("get_cmppolicy_bindings - Done")
except nitro_exception as e :
print("Exception::get_cmppolicy_bindings::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_cmppolicy_bindings::message="+str(e.args))
def get_auditnslogaction(self, client) :
try :
result = auditnslogaction.get(client)
if result :
for i in range(len(result)) :
print("cmppol name="+result[i].name+", ip=" + result[i].serverip)
else :
print("Exception::get_auditnslogaction - Done")
except nitro_exception as e :
print("Exception::get_auditnslogaction::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_auditnslogaction::message="+str(e.args))
def get_sslbindings(self, client) :
try :
obj = sslpolicy_binding()
obj.name = "certInsert_pol"
result = sslpolicy_binding.get(client, "certInsert_pol")
if result :
print("get_sslbindings result::name="+result.name)
if result.get_sslpolicy_csvserver_bindings() :
xx = [sslpolicy_csvserver_binding() in range(len(result.get_sslpolicy_csvserver_bindings()))]
xx = result.get_sslpolicy_csvserver_bindings()
for j in range(len(xx)) :
print("csvservername" + xx[j].boundto)
else :
print("get_sslbindings - Done")
except nitro_exception as e :
print("Exception::get_sslbindings::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_sslbindings::message="+str(e.args))
def get_hanode(self, client) :
try :
option_ = options()
option_.detailview = True
result = hanode.get(client, "", option_)
if result :
for i in range(len(result)) :
print("get_hanode result::Id="+result[i].id+", IP="+result[i].ipaddress+", effecive-interfaces="+result[i].enaifaces)
else :
print("get_hanode - Done")
except nitro_exception as e :
print("Exception::get_hanode::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_hanode::message="+str(e.args))
def get_dnssuffix(self, client) :
try :
result = dnssuffix.get(client, "citrix.com")
if result :
print("get_dnssuffix result::name="+result.Dnssuffix)
else :
print("get_dnssuffix result - Done")
except nitro_exception as e :
print("Exception::get_dnssuffix::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_dnssuffix::message="+str(e.args))
def get_gslbparameter(self, client) :
try :
result = gslbparameter.get(client)
if result :
for i in range(len(result)) :
print("get_gslbparameter result::mask="+result[i].ldnsmask+", ldnsProbOrder="+str(result[i].ldnsprobeorder))
else :
print("get_gslbparameter result - Done")
except nitro_exception as e :
print("Exception::get_gslbparameter::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_gslbparameter::message="+str(e.args))
def get_auditsyslogparams(self, client) :
try :
result = auditsyslogparams.get(client)
if result :
for i in range(len(result)) :
print("get_auditsyslogparams result::ip="+result[i].serverip+", loglevel="+str((result[i].loglevel)[0]))
else :
print("get_auditsyslogparams result - Done")
except nitro_exception as e :
print("Exception::get_auditsyslogparams::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_auditsyslogparams::message="+str(e.args))
def get_appfwconfield(self, client) :
try :
obj = appfwconfidfield()
obj.fieldname = "ap_con"
obj.url = "/test1"
result = appfwconfidfield.get(client, obj)
if result :
print("get_appfwconfield result::name="+result.fieldname+", url="+result.url)
else :
print("get_appfwconfield result - Done")
except nitro_exception as e :
print("Exception::get_appfwconfield::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_appfwconfield::message="+str(e.args))
def get_appfwprofile(self, client) :
try :
result = appfwprofile.get(client, "pr1")
if result :
print("get_appfwprofile result::name="+result.name+", StartURLAction="+result.starturlaction[0]+result.starturlaction[1]+ result.starturlaction[2])
else :
print("get_appfwprofile - Done ")
except nitro_exception as e :
print("Exception::get_appfwprofile::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_appfwprofile::message="+str(e.args))
def get_policyexpression(self, client) :
try :
args = policyexpression_args()
args.type = "classic"
result = policyexpression.get_args(client, args)
if result :
print("get_policyexpression result::length="+str(len(result)))
else :
print("get_policyexpression - Done ")
except nitro_exception as e :
print("Exception::get_policyexpression::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_policyexpression::message="+str(e.args))
def get_snmpgroup( self, client) :
try :
obj = snmpgroup()
obj.name = "snmp_grp"
obj.securitylevel = "noAuthNoPriv"
result = snmpgroup.get(client, obj)
if result :
print("get_snmpgroup result::name="+result.name)
else :
print("get_snmpgroup - done")
except nitro_exception as e :
print("Exception::get_snmpgroup::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_snmpgroup::message="+str(e.args))
def get_cacheobjects(self, client) :
try :
result = cacheobject.get(client)
if result :
print("get_cacheobjects result::length="+str(len(result)))
else:
print("getlbvserver_svc_bindings :: Done")
except nitro_exception as e :
print("Exception::get_cacheobjects::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_cacheobjects::message="+str(e.args))
def get_lbvserver(self, client):
try:
result = lbvserver.get(client)
if result :
print("get_lbvserver result::length="+str(len(result)))
else:
print("Exception::get_lbvserver - Done")
except nitro_exception as e:
print("Exception::get_lbvserver::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e:
print("Exception::get_lbvserver::message="+str(e.args))
def get_svc_bind(self, client) :
try :
obj = service_binding()
obj.name = "svc1"
result = service_binding.get(client,"svc1")
if result :
print("get_svc_bind result::length="+result.get_service_lbmonitor_bindings().length)
for i in range(len(result.get_service_lbmonitor_bindings())) :
print("resptime: "+result.get_service_lbmonitor_bindings()[i].get_responsetime())
else :
print("get_svc_bind - Done")
except nitro_exception as e :
print("Exception::get_svc_bind::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_svc_bind::message="+str(e.args))
def get_protocolhttpband(self, client) :
try :
obj = protocolhttpband()
obj.type = "REQUEST"
result = protocolhttpband.get(client, obj)
if result :
print("get_protocolhttpband result::length="+str(len(result.accesscount)))
else :
print("get_protocolhttpband - Done")
except nitro_exception as e :
print("Exception::get_protocolhttpband::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_protocolhttpband::message="+str(e.args))
def getlbvserver_bindings(self, client) :
try :
obj = lbvserver_binding()
obj.name = "lb_vip"
result = lbvserver_binding.get(client,"lb_vip")
if result :
print("getlbvserver_bindings result::name="+result.name)
else:
print("getlbvserver_svc_bindings :: Done")
except nitro_exception as e :
print("Exception::getlbvserver_bindings::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::getlbvserver_bindings::message="+str(e.args))
def getlbvserver_svc_bindings(self, client):
try:
result = lbvserver_service_binding.get(client, "lb1")
if result :
print("getlbvserver_svc_bindings result::length="+str(len(result)))
else:
print("getlbvserver_svc_bindings :: Done")
except nitro_exception as e:
print("Exception::getlbvserver_svc_bindings::errocode - "+str(e.errorcode)+", message - "+e.message)
except Exception as e:
print("Exception::getlbvserver_svc_bindings:: "+str(e.args))
def getfiltered_lbvserver_svc_bindings(self, client):
try:
filter_params = []
filter_params = [ filtervalue() for _ in range(2)]
filter_params[0] = filtervalue("servicetype","HTTP")
filter_params[1] = filtervalue("port","80")
result = lbvserver_service_binding.get_filtered(client, "lb1", filter_params)
if result :
print("getlbvserver_svc_bindings result::length="+str(len(result)))
else:
print("getlbvserver_svc_bindings :: Done")
except nitro_exception as e:
print("Exception::getfiltered_lbvserver_svc_bindings::errocode - "+str(e.errorcode)+", message - "+e.message)
except Exception as e:
print("Exception::getfiltered_lbvserver_svc_bindings:: "+str(e.args))
def getlbvserver_byname(self, client):
try:
obj = lbvserver.get(client, "lb1")
print("getlbvserver_byname result::name="+obj.name+", curstate="+obj.curstate+", effectivestate=" + obj.effectivestate)
except nitro_exception as e:
print("Exception::getlbvserver_byname::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e:
print("Exception::getlbvserver_byname::message="+str(e.args))
def getfiltered_services(self, client) :
try :
filter_params = []
filter_params = [ filtervalue() for _ in range(2)]
filter_params[0] = filtervalue("port", "80")
filter_params[1] = filtervalue("servicetype", "HTTP")
result = service.get_filtered(client, filter_params)
if result :
print("getfiltered_services result::length="+str(len(result)))
else :
print("Exception::getfiltered_services::Done")
except nitro_exception as e :
print("Exception::getfiltered_services::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::getfiltered_services::message="+str(e.args))
def get_appfwlearningdata(self, client) :
try :
args = appfwlearningdata()
args.profilename = "pr_testsite_3"
args.securitycheck = "starturl"
result = appfwlearningdata.get(client, args)
if result :
print("get_appfwlearningdata result::profile name="+appfwlearningdata.profilename+"blob= "+appfwlearningdata.data)
else :
print("Exception::get_appfwlearningdata::Done")
except nitro_exception as e :
print("Exception::get_appfwlearningdata::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e :
print("Exception::get_appfwlearningdata::message="+str(e.args))
def count_filtered_lbvserver(self, client):
try:
filter_params = []
filter_params = [ filtervalue() for _ in range(2)]
filter_params[0] = filtervalue("servicetype","HTTP")
filter_params[1] = filtervalue("port","80")
lbvs_count = lbvserver.count_filtered(client, filter_params)
print("count_filtered_lbvserver:: " + str(lbvs_count))
except nitro_exception as e:
print("Exception::count_filtered_lbvserver::errocode - "+str(e.errorcode)+", message - "+e.message)
except Exception as e:
print("Exception::count_filtered_lbvserver:: "+str(e.args))
def getfiltered_lbvserver(self, client):
try:
filter_params = []
filter_params = [ filtervalue() for _ in range(2)]
filter_params[0] = filtervalue("servicetype","HTTP")
filter_params[1] = filtervalue("port","80")
result = lbvserver.get_filtered(client, filter_params)
if result :
print("getfiltered_lbvserver result::length="+str(len(result)))
else:
print("Exception::getfiltered_lbvserver - Done")
except nitro_exception as e:
print("Exception::getfiltered_lbvserver::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e:
print("Exception::getfiltered_lbvserver::message="+str(e.args))
def run_sample(self, client):
self.get_svcgrp_svr_bind(client);
self.get_protocolhttpband(client);
self.get_lbvserver(client);
self.getlbvserver_byname(client);
self.getlbvserver_bindings(client);
self.getlbvserver_svc_bindings(client);
self.get_policyexpression(client);
self.get_cacheobjects(client);
self.get_snmpgroup(client);
self.get_appfwprofile(client);
self.get_appfwconfield(client);
self.get_auditsyslogparams(client);
self.get_gslbparameter(client);
self.get_dnssuffix(client);
self.get_hanode(client);
self.get_sslbindings(client);
self.get_cmppolicy_bindings(client);
self.get_auditnslogaction(client);
self.get_gslbldnsentries(client);
self.get_gslbservice_binds(client);
self.get_systemgrp_binds(client);
self.get_auth_bindings(client);
self.get_vpnglobal_vpnclientlessaccesspolicy_bindings(client);
self.get_sslcipher_binds(client);
self.get_nsmode(client);
self.get_enabled_modes(client);
self.get_nsfeature(client);
self.get_enabled_nsfeature(client);
self.get_svcmon_binds(client);
self.get_snmpoid(client);
self.get_appfwlearningdata(client);
self.count_lbvserver(client);
self.count_lbvserver_service_binding(client);
self.count_filtered_lbvserver(client);
self.getfiltered_lbvserver(client);
self.getfiltered_services(client);
self.getfiltered_lbvserver_svc_bindings(client);
self.get_channel(client);
self.get_snmpmanager(client);
self.get_snmptrap(client);
self.count_lbvserver_cachepolicy_binding(client);
self.get_interface(client);
self.get_dnsview_dnspol_binding(client);
self.get_svc_bind(client);
self.get_sslcertkey(client);
self.get_sslfipskey(client);
self.get_nsip(client);
self.get_nslimitidentifier(client);
self.get_nstcpbufparam(client);
self.get_nsxmlnamespace(client);
self.get_snmpuser(client);
self.get_nsacl(client);
self.count_snmpoid(client);
self.get_nsversion(client);
self.getlbvserver_bulk(client);
self.getlbvs_svc_bind_bulk(client);
self.get_vpnglobal_authpol(client);
self.get_gslbvserver(client);
self.get_gslbsite(client);
self.get_gslbservice(client);
self.get_gslbvserver_service_binding(client);
#
# Main thread of execution
#
if __name__ == '__main__':
try:
if len(sys.argv) != 4:
sys.exit()
else:
ipaddress=sys.argv[1]
username=sys.argv[2]
password=sys.argv[3]
get_config().main(get_config(),sys.argv)
except SystemExit:
print("Exception::Usage: Sample.py <directory path of Nitro.py> <nsip> <username> <password>")
|
apache-2.0
|
varigit/VAR-SOM-AM33-SDK7-Kernel
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
joebowen/movement_validation_cloud
|
djangodev/lib/python2.7/site-packages/boto/ec2/autoscale/instance.py
|
151
|
2428
|
# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Instance(object):
def __init__(self, connection=None):
self.connection = connection
self.instance_id = None
self.health_status = None
self.launch_config_name = None
self.lifecycle_state = None
self.availability_zone = None
self.group_name = None
def __repr__(self):
r = 'Instance<id:%s, state:%s, health:%s' % (self.instance_id,
self.lifecycle_state,
self.health_status)
if self.group_name:
r += ' group:%s' % self.group_name
r += '>'
return r
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'InstanceId':
self.instance_id = value
elif name == 'HealthStatus':
self.health_status = value
elif name == 'LaunchConfigurationName':
self.launch_config_name = value
elif name == 'LifecycleState':
self.lifecycle_state = value
elif name == 'AvailabilityZone':
self.availability_zone = value
elif name == 'AutoScalingGroupName':
self.group_name = value
else:
setattr(self, name, value)
|
mit
|
bbgw/Klampt
|
Python/demos/vistemplate.py
|
2
|
2256
|
#!/usr/bin/python
import sys
from klampt import *
from klampt import visualization
from klampt import coordinates
from klampt import so3
import time
import math
if __name__ == "__main__":
print "vistemplate.py: This example demonstrates how to run the visualization framework"
if len(sys.argv)<=1:
print "USAGE: vistemplate.py [world_file]"
exit()
#creates a world and loads all the items on the command line
world = WorldModel()
for fn in sys.argv[1:]:
res = world.readFile(fn)
if not res:
raise RuntimeError("Unable to load model "+fn)
coordinates.setWorldModel(world)
#add the world to the visualizer
visualization.add("world",world)
#add the coordinate Manager to the visualizer
visualization.add("coordinates",coordinates.manager())
#test a point
pt = [2,5,1]
visualization.add("some point",pt)
#test a rigid transform
visualization.add("some blinking transform",[so3.identity(),[1,3,0.5]])
#test an IKObjective
link = world.robot(0).link(world.robot(0).numLinks()-1)
#point constraint
#obj = ik.objective(link,local=[[0,0,0]],world=[pt])
#hinge constraint
obj = ik.objective(link,local=[[0,0,0],[0,0,0.1]],world=[pt,[pt[0],pt[1],pt[2]+0.1]])
#transform constraint
#obj = ik.objective(link,R=link.getTransform()[0],t=pt)
visualization.add("ik objective",obj)
print "Starting visualization..."
#run the visualizer in a separate thread
visualization.show()
iteration = 0
while visualization.shown():
visualization.lock()
#TODO: you may modify the world here. This line tests a sin wave.
pt[2] = 1 + math.sin(iteration*0.03)
visualization.unlock()
#changes to the visualization must be done outside the lock
if (iteration / 100)%2 == 0:
visualization.hide("some blinking transform")
else:
visualization.hide("some blinking transform",False)
#this is another way of changing the point's data
#visualization.add("some point",[2,5,1 + math.sin(iteration*0.03)],keepAppearance=True)
time.sleep(0.01)
iteration += 1
print "Ending visualization."
visualization.kill()
|
bsd-3-clause
|
ampax/edx-platform
|
cms/lib/xblock/test/test_runtime.py
|
148
|
2238
|
"""
Tests of edX Studio runtime functionality
"""
from urlparse import urlparse
from mock import Mock
from unittest import TestCase
from cms.lib.xblock.runtime import handler_url
class TestHandlerUrl(TestCase):
"""Test the LMS handler_url"""
def setUp(self):
super(TestHandlerUrl, self).setUp()
self.block = Mock()
def test_trailing_characters(self):
self.assertFalse(handler_url(self.block, 'handler').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix', 'query').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', 'suffix', 'query').endswith('/'))
self.assertFalse(handler_url(self.block, 'handler', query='query').endswith('?'))
self.assertFalse(handler_url(self.block, 'handler', query='query').endswith('/'))
def _parsed_query(self, query_string):
"""Return the parsed query string from a handler_url generated with the supplied query_string"""
return urlparse(handler_url(self.block, 'handler', query=query_string)).query
def test_query_string(self):
self.assertIn('foo=bar', self._parsed_query('foo=bar'))
self.assertIn('foo=bar&baz=true', self._parsed_query('foo=bar&baz=true'))
self.assertIn('foo&bar&baz', self._parsed_query('foo&bar&baz'))
def _parsed_path(self, handler_name='handler', suffix=''):
"""Return the parsed path from a handler_url with the supplied handler_name and suffix"""
return urlparse(handler_url(self.block, handler_name, suffix=suffix)).path
def test_suffix(self):
self.assertTrue(self._parsed_path(suffix="foo").endswith('foo'))
self.assertTrue(self._parsed_path(suffix="foo/bar").endswith('foo/bar'))
self.assertTrue(self._parsed_path(suffix="/foo/bar").endswith('/foo/bar'))
def test_handler_name(self):
self.assertIn('handler1', self._parsed_path('handler1'))
self.assertIn('handler_a', self._parsed_path('handler_a'))
|
agpl-3.0
|
pkuyym/Paddle
|
python/paddle/fluid/tests/unittests/test_assign_op.py
|
5
|
1035
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import op_test
import numpy
import unittest
class TestAssignOp(op_test.OpTest):
def setUp(self):
self.op_type = "assign"
x = numpy.random.random(size=(100, 10))
self.inputs = {'X': x}
self.outputs = {'Out': x}
def test_forward(self):
self.check_output()
def test_backward(self):
self.check_grad(['X'], 'Out')
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
40323120/2016springcd_aG2
|
static/plugin/liquid_tags/b64img.py
|
312
|
3085
|
"""
Image Tag
---------
This implements a Liquid-style image tag for Pelican,
based on the liquid img tag which is based on the octopress image tag [1]_
Syntax
------
{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}
Examples
--------
{% b64img /images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png 150 150 "Ninja Attack!" "Ninja in attack posture" %}
Output
------
<img src="data:;base64,....">
<img class="left half" src="data:;base64,..." title="Ninja Attack!" alt="Ninja Attack!">
<img class="left half" src="data:;base64,..." width="150" height="150" title="Ninja Attack!" alt="Ninja in attack posture">
[1] https://github.com/imathis/octopress/blob/master/plugins/image_tag.rb
"""
import re
import base64
import urllib2
from .mdx_liquid_tags import LiquidTags
import six
SYNTAX = '{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}'
# Regular expression to match the entire syntax
ReImg = re.compile("""(?P<class>\S.*\s+)?(?P<src>(?:https?:\/\/|\/|\S+\/)\S+)(?:\s+(?P<width>\d+))?(?:\s+(?P<height>\d+))?(?P<title>\s+.+)?""")
# Regular expression to split the title and alt text
ReTitleAlt = re.compile("""(?:"|')(?P<title>[^"']+)?(?:"|')\s+(?:"|')(?P<alt>[^"']+)?(?:"|')""")
def _get_file(src):
""" Return content from local or remote file. """
try:
if '://' in src or src[0:2] == '//': # Most likely this is remote file
response = urllib2.urlopen(src)
return response.read()
else:
with open(src, 'rb') as fh:
return fh.read()
except Exception as e:
raise RuntimeError('Error generating base64image: {}'.format(e))
def base64image(src):
""" Generate base64 encoded image from srouce file. """
return base64.b64encode(_get_file(src))
@LiquidTags.register('b64img')
def b64img(preprocessor, tag, markup):
attrs = None
# Parse the markup string
match = ReImg.search(markup)
if match:
attrs = dict([(key, val.strip())
for (key, val) in six.iteritems(match.groupdict()) if val])
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# Check if alt text is present -- if so, split it from title
if 'title' in attrs:
match = ReTitleAlt.search(attrs['title'])
if match:
attrs.update(match.groupdict())
if not attrs.get('alt'):
attrs['alt'] = attrs['title']
attrs['src'] = 'data:;base64,{}'.format(base64image(attrs['src']))
# Return the formatted text
return "<img {0}>".format(' '.join('{0}="{1}"'.format(key, val)
for (key, val) in six.iteritems(attrs)))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
|
agpl-3.0
|
pdellaert/ansible
|
lib/ansible/module_utils/network/nxos/facts/legacy/base.py
|
8
|
24988
|
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import platform
import re
from ansible.module_utils.network.nxos.nxos import run_commands, get_config, get_capabilities
from ansible.module_utils.network.nxos.utils.utils import get_interface_type, normalize_interface
from ansible.module_utils.six import iteritems
g_config = None
class FactsBase(object):
def __init__(self, module):
self.module = module
self.warnings = list()
self.facts = dict()
self.capabilities = get_capabilities(self.module)
def populate(self):
pass
def run(self, command, output='text'):
command_string = command
command = {
'command': command,
'output': output
}
resp = run_commands(self.module, [command], check_rc='retry_json')
try:
return resp[0]
except IndexError:
self.warnings.append('command %s failed, facts for this command will not be populated' % command_string)
return None
def get_config(self):
global g_config
if not g_config:
g_config = get_config(self.module)
return g_config
def transform_dict(self, data, keymap):
transform = dict()
for key, fact in keymap:
if key in data:
transform[fact] = data[key]
return transform
def transform_iterable(self, iterable, keymap):
for item in iterable:
yield self.transform_dict(item, keymap)
class Default(FactsBase):
def populate(self):
data = None
data = self.run('show version')
if data:
self.facts['serialnum'] = self.parse_serialnum(data)
data = self.run('show license host-id')
if data:
self.facts['license_hostid'] = self.parse_license_hostid(data)
self.facts.update(self.platform_facts())
def parse_serialnum(self, data):
match = re.search(r'Processor Board ID\s*(\S+)', data, re.M)
if match:
return match.group(1)
def platform_facts(self):
platform_facts = {}
resp = self.capabilities
device_info = resp['device_info']
platform_facts['system'] = device_info['network_os']
for item in ('model', 'image', 'version', 'platform', 'hostname'):
val = device_info.get('network_os_%s' % item)
if val:
platform_facts[item] = val
platform_facts['api'] = resp['network_api']
platform_facts['python_version'] = platform.python_version()
return platform_facts
def parse_license_hostid(self, data):
match = re.search(r'License hostid: VDH=(.+)$', data, re.M)
if match:
return match.group(1)
class Config(FactsBase):
def populate(self):
super(Config, self).populate()
self.facts['config'] = self.get_config()
class Features(FactsBase):
def populate(self):
super(Features, self).populate()
data = self.get_config()
if data:
features = []
for line in data.splitlines():
if line.startswith('feature'):
features.append(line.replace('feature', '').strip())
self.facts['features_enabled'] = features
class Hardware(FactsBase):
def populate(self):
data = self.run('dir')
if data:
self.facts['filesystems'] = self.parse_filesystems(data)
data = None
data = self.run('show system resources', output='json')
if data:
if isinstance(data, dict):
self.facts['memtotal_mb'] = int(data['memory_usage_total']) / 1024
self.facts['memfree_mb'] = int(data['memory_usage_free']) / 1024
else:
self.facts['memtotal_mb'] = self.parse_memtotal_mb(data)
self.facts['memfree_mb'] = self.parse_memfree_mb(data)
def parse_filesystems(self, data):
return re.findall(r'^Usage for (\S+)//', data, re.M)
def parse_memtotal_mb(self, data):
match = re.search(r'(\S+)K(\s+|)total', data, re.M)
if match:
memtotal = match.group(1)
return int(memtotal) / 1024
def parse_memfree_mb(self, data):
match = re.search(r'(\S+)K(\s+|)free', data, re.M)
if match:
memfree = match.group(1)
return int(memfree) / 1024
class Interfaces(FactsBase):
INTERFACE_MAP = frozenset([
('state', 'state'),
('desc', 'description'),
('eth_bw', 'bandwidth'),
('eth_duplex', 'duplex'),
('eth_speed', 'speed'),
('eth_mode', 'mode'),
('eth_hw_addr', 'macaddress'),
('eth_mtu', 'mtu'),
('eth_hw_desc', 'type')
])
INTERFACE_SVI_MAP = frozenset([
('svi_line_proto', 'state'),
('svi_bw', 'bandwidth'),
('svi_mac', 'macaddress'),
('svi_mtu', 'mtu'),
('type', 'type')
])
INTERFACE_IPV4_MAP = frozenset([
('eth_ip_addr', 'address'),
('eth_ip_mask', 'masklen')
])
INTERFACE_SVI_IPV4_MAP = frozenset([
('svi_ip_addr', 'address'),
('svi_ip_mask', 'masklen')
])
INTERFACE_IPV6_MAP = frozenset([
('addr', 'address'),
('prefix', 'subnet')
])
def ipv6_structure_op_supported(self):
data = self.capabilities
if data:
nxos_os_version = data['device_info']['network_os_version']
unsupported_versions = ['I2', 'F1', 'A8']
for ver in unsupported_versions:
if ver in nxos_os_version:
return False
return True
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
self.facts['neighbors'] = {}
data = None
data = self.run('show interface', output='json')
if data:
if isinstance(data, dict):
self.facts['interfaces'] = self.populate_structured_interfaces(data)
else:
interfaces = self.parse_interfaces(data)
self.facts['interfaces'] = self.populate_interfaces(interfaces)
if self.ipv6_structure_op_supported():
data = self.run('show ipv6 interface', output='json')
else:
data = None
if data:
if isinstance(data, dict):
self.populate_structured_ipv6_interfaces(data)
else:
interfaces = self.parse_interfaces(data)
self.populate_ipv6_interfaces(interfaces)
data = self.run('show lldp neighbors', output='json')
if data:
if isinstance(data, dict):
self.facts['neighbors'].update(self.populate_structured_neighbors_lldp(data))
else:
self.facts['neighbors'].update(self.populate_neighbors(data))
data = self.run('show cdp neighbors detail', output='json')
if data:
if isinstance(data, dict):
self.facts['neighbors'].update(self.populate_structured_neighbors_cdp(data))
else:
self.facts['neighbors'].update(self.populate_neighbors_cdp(data))
self.facts['neighbors'].pop(None, None) # Remove null key
def populate_structured_interfaces(self, data):
interfaces = dict()
for item in data['TABLE_interface']['ROW_interface']:
name = item['interface']
intf = dict()
if 'type' in item:
intf.update(self.transform_dict(item, self.INTERFACE_SVI_MAP))
else:
intf.update(self.transform_dict(item, self.INTERFACE_MAP))
if 'eth_ip_addr' in item:
intf['ipv4'] = self.transform_dict(item, self.INTERFACE_IPV4_MAP)
self.facts['all_ipv4_addresses'].append(item['eth_ip_addr'])
if 'svi_ip_addr' in item:
intf['ipv4'] = self.transform_dict(item, self.INTERFACE_SVI_IPV4_MAP)
self.facts['all_ipv4_addresses'].append(item['svi_ip_addr'])
interfaces[name] = intf
return interfaces
def populate_structured_ipv6_interfaces(self, data):
try:
data = data['TABLE_intf']
if data:
if isinstance(data, dict):
data = [data]
for item in data:
name = item['ROW_intf']['intf-name']
intf = self.facts['interfaces'][name]
intf['ipv6'] = self.transform_dict(item, self.INTERFACE_IPV6_MAP)
try:
addr = item['ROW_intf']['addr']
except KeyError:
addr = item['ROW_intf']['TABLE_addr']['ROW_addr']['addr']
self.facts['all_ipv6_addresses'].append(addr)
else:
return ""
except TypeError:
return ""
def populate_structured_neighbors_lldp(self, data):
objects = dict()
data = data['TABLE_nbor']['ROW_nbor']
if isinstance(data, dict):
data = [data]
for item in data:
local_intf = normalize_interface(item['l_port_id'])
objects[local_intf] = list()
nbor = dict()
nbor['port'] = item['port_id']
nbor['host'] = nbor['sysname'] = item['chassis_id']
objects[local_intf].append(nbor)
return objects
def populate_structured_neighbors_cdp(self, data):
objects = dict()
data = data['TABLE_cdp_neighbor_detail_info']['ROW_cdp_neighbor_detail_info']
if isinstance(data, dict):
data = [data]
for item in data:
local_intf = item['intf_id']
objects[local_intf] = list()
nbor = dict()
nbor['port'] = item['port_id']
nbor['host'] = nbor['sysname'] = item['device_id']
objects[local_intf].append(nbor)
return objects
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line.startswith('admin') or line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
if not key.startswith('admin') or not key.startswith('IPv6 Interface'):
parsed[key] = line
return parsed
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
if get_interface_type(key) == 'svi':
intf['state'] = self.parse_state(key, value, intf_type='svi')
intf['macaddress'] = self.parse_macaddress(value, intf_type='svi')
intf['mtu'] = self.parse_mtu(value, intf_type='svi')
intf['bandwidth'] = self.parse_bandwidth(value, intf_type='svi')
intf['type'] = self.parse_type(value, intf_type='svi')
if 'Internet Address' in value:
intf['ipv4'] = self.parse_ipv4_address(value, intf_type='svi')
facts[key] = intf
else:
intf['state'] = self.parse_state(key, value)
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
intf['mode'] = self.parse_mode(value)
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['duplex'] = self.parse_duplex(value)
intf['speed'] = self.parse_speed(value)
intf['type'] = self.parse_type(value)
if 'Internet Address' in value:
intf['ipv4'] = self.parse_ipv4_address(value)
facts[key] = intf
return facts
def parse_state(self, key, value, intf_type='ethernet'):
match = None
if intf_type == 'svi':
match = re.search(r'line protocol is\s*(\S+)', value, re.M)
else:
match = re.search(r'%s is\s*(\S+)' % key, value, re.M)
if match:
return match.group(1)
def parse_macaddress(self, value, intf_type='ethernet'):
match = None
if intf_type == 'svi':
match = re.search(r'address is\s*(\S+)', value, re.M)
else:
match = re.search(r'address:\s*(\S+)', value, re.M)
if match:
return match.group(1)
def parse_mtu(self, value, intf_type='ethernet'):
match = re.search(r'MTU\s*(\S+)', value, re.M)
if match:
return match.group(1)
def parse_bandwidth(self, value, intf_type='ethernet'):
match = re.search(r'BW\s*(\S+)', value, re.M)
if match:
return match.group(1)
def parse_type(self, value, intf_type='ethernet'):
match = None
if intf_type == 'svi':
match = re.search(r'Hardware is\s*(\S+)', value, re.M)
else:
match = re.search(r'Hardware:\s*(.+),', value, re.M)
if match:
return match.group(1)
def parse_description(self, value, intf_type='ethernet'):
match = re.search(r'Description: (.+)$', value, re.M)
if match:
return match.group(1)
def parse_mode(self, value, intf_type='ethernet'):
match = re.search(r'Port mode is (\S+)', value, re.M)
if match:
return match.group(1)
def parse_duplex(self, value, intf_type='ethernet'):
match = re.search(r'(\S+)-duplex', value, re.M)
if match:
return match.group(1)
def parse_speed(self, value, intf_type='ethernet'):
match = re.search(r'duplex, (.+)$', value, re.M)
if match:
return match.group(1)
def parse_ipv4_address(self, value, intf_type='ethernet'):
ipv4 = {}
match = re.search(r'Internet Address is (.+)$', value, re.M)
if match:
address = match.group(1)
addr = address.split('/')[0]
ipv4['address'] = address.split('/')[0]
ipv4['masklen'] = address.split('/')[1]
self.facts['all_ipv4_addresses'].append(addr)
return ipv4
def populate_neighbors(self, data):
objects = dict()
# if there are no neighbors the show command returns
# ERROR: No neighbour information
if data.startswith('ERROR'):
return dict()
regex = re.compile(r'(\S+)\s+(\S+)\s+\d+\s+\w+\s+(\S+)')
for item in data.split('\n')[4:-1]:
match = regex.match(item)
if match:
nbor = dict()
nbor['host'] = nbor['sysname'] = match.group(1)
nbor['port'] = match.group(3)
local_intf = normalize_interface(match.group(2))
if local_intf not in objects:
objects[local_intf] = []
objects[local_intf].append(nbor)
return objects
def populate_neighbors_cdp(self, data):
facts = dict()
for item in data.split('----------------------------------------'):
if item == '':
continue
local_intf = self.parse_lldp_intf(item)
if local_intf not in facts:
facts[local_intf] = list()
fact = dict()
fact['port'] = self.parse_lldp_port(item)
fact['sysname'] = self.parse_lldp_sysname(item)
facts[local_intf].append(fact)
return facts
def parse_lldp_intf(self, data):
match = re.search(r'Interface:\s*(\S+)', data, re.M)
if match:
return match.group(1).strip(',')
def parse_lldp_port(self, data):
match = re.search(r'Port ID \(outgoing port\):\s*(\S+)', data, re.M)
if match:
return match.group(1)
def parse_lldp_sysname(self, data):
match = re.search(r'Device ID:(.+)$', data, re.M)
if match:
return match.group(1)
def populate_ipv6_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['ipv6'] = self.parse_ipv6_address(value)
facts[key] = intf
def parse_ipv6_address(self, value):
ipv6 = {}
match_addr = re.search(r'IPv6 address:\s*(\S+)', value, re.M)
if match_addr:
addr = match_addr.group(1)
ipv6['address'] = addr
self.facts['all_ipv6_addresses'].append(addr)
match_subnet = re.search(r'IPv6 subnet:\s*(\S+)', value, re.M)
if match_subnet:
ipv6['subnet'] = match_subnet.group(1)
return ipv6
class Legacy(FactsBase):
# facts from nxos_facts 2.1
VERSION_MAP = frozenset([
('host_name', '_hostname'),
('kickstart_ver_str', '_os'),
('chassis_id', '_platform')
])
MODULE_MAP = frozenset([
('model', 'model'),
('modtype', 'type'),
('ports', 'ports'),
('status', 'status')
])
FAN_MAP = frozenset([
('fanname', 'name'),
('fanmodel', 'model'),
('fanhwver', 'hw_ver'),
('fandir', 'direction'),
('fanstatus', 'status')
])
POWERSUP_MAP = frozenset([
('psmodel', 'model'),
('psnum', 'number'),
('ps_status', 'status'),
('ps_status_3k', 'status'),
('actual_out', 'actual_output'),
('actual_in', 'actual_in'),
('total_capa', 'total_capacity'),
('input_type', 'input_type'),
('watts', 'watts'),
('amps', 'amps')
])
def populate(self):
data = None
data = self.run('show version', output='json')
if data:
if isinstance(data, dict):
self.facts.update(self.transform_dict(data, self.VERSION_MAP))
else:
self.facts['_hostname'] = self.parse_hostname(data)
self.facts['_os'] = self.parse_os(data)
self.facts['_platform'] = self.parse_platform(data)
data = self.run('show interface', output='json')
if data:
if isinstance(data, dict):
self.facts['_interfaces_list'] = self.parse_structured_interfaces(data)
else:
self.facts['_interfaces_list'] = self.parse_interfaces(data)
data = self.run('show vlan brief', output='json')
if data:
if isinstance(data, dict):
self.facts['_vlan_list'] = self.parse_structured_vlans(data)
else:
self.facts['_vlan_list'] = self.parse_vlans(data)
data = self.run('show module', output='json')
if data:
if isinstance(data, dict):
self.facts['_module'] = self.parse_structured_module(data)
else:
self.facts['_module'] = self.parse_module(data)
data = self.run('show environment fan', output='json')
if data:
if isinstance(data, dict):
self.facts['_fan_info'] = self.parse_structured_fan_info(data)
else:
self.facts['_fan_info'] = self.parse_fan_info(data)
data = self.run('show environment power', output='json')
if data:
if isinstance(data, dict):
self.facts['_power_supply_info'] = self.parse_structured_power_supply_info(data)
else:
self.facts['_power_supply_info'] = self.parse_power_supply_info(data)
def parse_structured_interfaces(self, data):
objects = list()
for item in data['TABLE_interface']['ROW_interface']:
objects.append(item['interface'])
return objects
def parse_structured_vlans(self, data):
objects = list()
data = data['TABLE_vlanbriefxbrief']['ROW_vlanbriefxbrief']
if isinstance(data, dict):
objects.append(data['vlanshowbr-vlanid-utf'])
elif isinstance(data, list):
for item in data:
objects.append(item['vlanshowbr-vlanid-utf'])
return objects
def parse_structured_module(self, data):
data = data['TABLE_modinfo']['ROW_modinfo']
if isinstance(data, dict):
data = [data]
objects = list(self.transform_iterable(data, self.MODULE_MAP))
return objects
def parse_structured_fan_info(self, data):
objects = list()
if data.get('fandetails'):
data = data['fandetails']['TABLE_faninfo']['ROW_faninfo']
elif data.get('fandetails_3k'):
data = data['fandetails_3k']['TABLE_faninfo']['ROW_faninfo']
else:
return objects
objects = list(self.transform_iterable(data, self.FAN_MAP))
return objects
def parse_structured_power_supply_info(self, data):
if data.get('powersup').get('TABLE_psinfo_n3k'):
fact = data['powersup']['TABLE_psinfo_n3k']['ROW_psinfo_n3k']
else:
if isinstance(data['powersup']['TABLE_psinfo'], list):
fact = []
for i in data['powersup']['TABLE_psinfo']:
fact.append(i['ROW_psinfo'])
else:
fact = data['powersup']['TABLE_psinfo']['ROW_psinfo']
objects = list(self.transform_iterable(fact, self.POWERSUP_MAP))
return objects
def parse_hostname(self, data):
match = re.search(r'\s+Device name:\s+(\S+)', data, re.M)
if match:
return match.group(1)
def parse_os(self, data):
match = re.search(r'\s+system:\s+version\s*(\S+)', data, re.M)
if match:
return match.group(1)
else:
match = re.search(r'\s+kickstart:\s+version\s*(\S+)', data, re.M)
if match:
return match.group(1)
def parse_platform(self, data):
match = re.search(r'Hardware\n\s+cisco\s+(\S+\s+\S+)', data, re.M)
if match:
return match.group(1)
def parse_interfaces(self, data):
objects = list()
for line in data.split('\n'):
if len(line) == 0:
continue
elif line.startswith('admin') or line[0] == ' ':
continue
else:
match = re.match(r'^(\S+)', line)
if match:
intf = match.group(1)
if get_interface_type(intf) != 'unknown':
objects.append(intf)
return objects
def parse_vlans(self, data):
objects = list()
for line in data.splitlines():
if line == '':
continue
if line[0].isdigit():
vlan = line.split()[0]
objects.append(vlan)
return objects
def parse_module(self, data):
objects = list()
for line in data.splitlines():
if line == '':
break
if line[0].isdigit():
obj = {}
match_port = re.search(r'\d\s*(\d*)', line, re.M)
if match_port:
obj['ports'] = match_port.group(1)
match = re.search(r'\d\s*\d*\s*(.+)$', line, re.M)
if match:
l = match.group(1).split(' ')
items = list()
for item in l:
if item == '':
continue
items.append(item.strip())
if items:
obj['type'] = items[0]
obj['model'] = items[1]
obj['status'] = items[2]
objects.append(obj)
return objects
def parse_fan_info(self, data):
objects = list()
for l in data.splitlines():
if '-----------------' in l or 'Status' in l:
continue
line = l.split()
if len(line) > 1:
obj = {}
obj['name'] = line[0]
obj['model'] = line[1]
obj['hw_ver'] = line[-2]
obj['status'] = line[-1]
objects.append(obj)
return objects
def parse_power_supply_info(self, data):
objects = list()
for l in data.splitlines():
if l == '':
break
if l[0].isdigit():
obj = {}
line = l.split()
obj['model'] = line[1]
obj['number'] = line[0]
obj['status'] = line[-1]
objects.append(obj)
return objects
|
gpl-3.0
|
jnfrye/local_plants_book
|
scripts/observations/analyze/species_data_analyzer.py
|
1
|
4641
|
import pandas as pd
import argparse
from difflib import get_close_matches
import PyFloraBook.in_out.data_coordinator as dc
# ---------------- GLOBALS ----------------
# These are the weights used to create the final score (a weighted avg)
WEIGHTS = {
"CalFlora": 1,
"OregonFlora": 1,
"CPNWH_OR": 1,
"CPNWH_WA": 1,
}
WEBSITES = WEIGHTS.keys()
INPUT_SUFFIX = "species"
OUTPUT_SUFFIX = "scores"
# ---------------- INPUT ----------------
# Parse arguments
parser = argparse.ArgumentParser(
description='Gather species counts for given families and analyze'
)
parser.add_argument(
"-f", "--families", nargs='+',
help="Names of the families to be analyzed."
)
args = parser.parse_args()
families = args.families
# Normalize the weights
weights_df = pd.DataFrame.from_dict(WEIGHTS, orient="index")
weights_df.columns = ["weight"]
weights_df['normed'] = \
weights_df['weight'] / weights_df['weight'].sum(axis=0)
# TODO Refactor this into an `is_normalized(...)` function
assert 0.999 < weights_df['normed'].sum(axis=0) < 1.001
weights_df.drop('weight', axis=1, inplace=True)
# Locate relevant folders
cleansed_data_folder = dc.locate_cleansed_data_folder()
scores_folder = dc.locate_scores_folder()
for family in families:
# Read data from files
data_frames = dict()
for website in WEBSITES:
website_folder = cleansed_data_folder / website
website_data_file_name = family + "_" + INPUT_SUFFIX + ".csv"
data_frames[website] = pd.read_csv(
str(website_folder / website_data_file_name), index_col=0
)
# Normalize data and combine into a single dataframe
normed_data = pd.DataFrame()
for key, df in data_frames.items():
df['normed'] = df['count'] / df['count'].sum(axis=0)
assert 0.999 < df['normed'].sum(axis=0) < 1.001
df.drop('count', axis=1, inplace=True)
normed_data = pd.concat([normed_data] + [df], axis=1)
normed_data.columns = list(normed_data.columns[:-1]) + [key]
# Replace NaN entries with zeros
normed_data = normed_data.fillna(0)
# Try to find alternate spellings
close_matches = []
indices = list(normed_data.index.values)
for index in indices:
matched_pair = get_close_matches(index, indices, cutoff=0.83, n=2)
if len(matched_pair) > 1 and matched_pair[::-1] not in close_matches:
close_matches.append(matched_pair)
# Since the two rows are possibly just different spellings or synonyms,
# we can confirm this by seeing if the rows are "disjoint"; that is, if
# no two columns are both nonzero.
for match in close_matches:
matched_rows = normed_data.loc[match]
are_disjoint = not any(
[all(matched_rows[col] != 0) for col in matched_rows]
)
# this code is ghetto but it's used below and I guess it works lol
if are_disjoint:
match.append(" *** ")
else:
match.append(" ")
# Merge alternate spellings (if desired)
while len(close_matches) > 0:
print(
"These pairs of species names look similar. "
"Should any be merged? If prefixed by ***, the rows are 'disjoint'"
)
for index, match in enumerate(close_matches):
print(match[2] + match[0] + "\n" + \
match[2] + match[1] + " " + str(index) + "\n")
choices = input(
"Type index to merge, a space, then 'u'/'d' to merge up/down."
"\nType -1 to quit.\n"
).split()
index_choice = int(choices[0])
if index_choice is -1:
break
merge_direction = choices[1]
if merge_direction == 'd':
sign = -1
elif merge_direction == 'u':
sign = +1
else:
raise
target, replacement = tuple(close_matches[index_choice][:2][::sign])
del close_matches[index_choice]
normed_data.loc[target] += normed_data.loc[replacement]
normed_data.drop([replacement], inplace=True)
# Create final score column
normed_data['score'] = normed_data.dot(weights_df)
assert 0.999 < normed_data['score'].sum(axis=0) < 1.001
normed_data.index = pd.MultiIndex.from_tuples(
list(map(tuple, normed_data.index.str.split())),
names=("genus", "species")
)
print(normed_data["score"].sum(level="genus"))
print(normed_data['score'])
scores_file_name = family + '_' + OUTPUT_SUFFIX + ".csv"
normed_data.to_csv(
str(scores_folder / scores_file_name), columns=["score"]
)
|
mit
|
nttks/edx-platform
|
lms/djangoapps/bulk_email/tests/test_forms.py
|
45
|
12615
|
# -*- coding: utf-8 -*-
"""
Unit tests for bulk-email-related forms.
"""
from django.conf import settings
from mock import patch
from nose.plugins.attrib import attr
from bulk_email.models import CourseAuthorization, CourseEmailTemplate
from bulk_email.forms import CourseAuthorizationAdminForm, CourseEmailTemplateForm
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_TOY_MODULESTORE
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
@attr('shard_1')
class CourseAuthorizationFormTest(ModuleStoreTestCase):
"""Test the CourseAuthorizationAdminForm form for Mongo-backed courses."""
def setUp(self):
super(CourseAuthorizationFormTest, self).setUp()
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_authorize_mongo_course(self):
# Initially course shouldn't be authorized
self.assertFalse(CourseAuthorization.instructor_email_enabled(self.course.id))
# Test authorizing the course, which should totally work
form_data = {'course_id': self.course.id.to_deprecated_string(), 'email_enabled': True}
form = CourseAuthorizationAdminForm(data=form_data)
# Validation should work
self.assertTrue(form.is_valid())
form.save()
# Check that this course is authorized
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_repeat_course(self):
# Initially course shouldn't be authorized
self.assertFalse(CourseAuthorization.instructor_email_enabled(self.course.id))
# Test authorizing the course, which should totally work
form_data = {'course_id': self.course.id.to_deprecated_string(), 'email_enabled': True}
form = CourseAuthorizationAdminForm(data=form_data)
# Validation should work
self.assertTrue(form.is_valid())
form.save()
# Check that this course is authorized
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Now make a new course authorization with the same course id that tries to turn email off
form_data = {'course_id': self.course.id.to_deprecated_string(), 'email_enabled': False}
form = CourseAuthorizationAdminForm(data=form_data)
# Validation should not work because course_id field is unique
self.assertFalse(form.is_valid())
self.assertEquals(
"Course authorization with this Course id already exists.",
form._errors['course_id'][0] # pylint: disable=protected-access
)
with self.assertRaisesRegexp(
ValueError,
"The CourseAuthorization could not be created because the data didn't validate."
):
form.save()
# Course should still be authorized (invalid attempt had no effect)
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_form_typo(self):
# Munge course id
bad_id = SlashSeparatedCourseKey(u'Broken{}'.format(self.course.id.org), 'hello', self.course.id.run + '_typo')
form_data = {'course_id': bad_id.to_deprecated_string(), 'email_enabled': True}
form = CourseAuthorizationAdminForm(data=form_data)
# Validation shouldn't work
self.assertFalse(form.is_valid())
msg = u'COURSE NOT FOUND'
msg += u' --- Entered course id was: "{0}". '.format(bad_id.to_deprecated_string())
msg += 'Please recheck that you have supplied a valid course id.'
self.assertEquals(msg, form._errors['course_id'][0]) # pylint: disable=protected-access
with self.assertRaisesRegexp(
ValueError,
"The CourseAuthorization could not be created because the data didn't validate."
):
form.save()
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_form_invalid_key(self):
form_data = {'course_id': "asd::**!@#$%^&*())//foobar!!", 'email_enabled': True}
form = CourseAuthorizationAdminForm(data=form_data)
# Validation shouldn't work
self.assertFalse(form.is_valid())
msg = u'Course id invalid.'
msg += u' --- Entered course id was: "asd::**!@#$%^&*())//foobar!!". '
msg += 'Please recheck that you have supplied a valid course id.'
self.assertEquals(msg, form._errors['course_id'][0]) # pylint: disable=protected-access
with self.assertRaisesRegexp(
ValueError,
"The CourseAuthorization could not be created because the data didn't validate."
):
form.save()
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_course_name_only(self):
# Munge course id - common
form_data = {'course_id': self.course.id.run, 'email_enabled': True}
form = CourseAuthorizationAdminForm(data=form_data)
# Validation shouldn't work
self.assertFalse(form.is_valid())
error_msg = form._errors['course_id'][0] # pylint: disable=protected-access
self.assertIn(u'--- Entered course id was: "{0}". '.format(self.course.id.run), error_msg)
self.assertIn(u'Please recheck that you have supplied a valid course id.', error_msg)
with self.assertRaisesRegexp(
ValueError,
"The CourseAuthorization could not be created because the data didn't validate."
):
form.save()
class CourseAuthorizationXMLFormTest(ModuleStoreTestCase):
"""Check that XML courses cannot be authorized for email."""
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_xml_course_authorization(self):
course_id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
# Assert this is an XML course
self.assertEqual(modulestore().get_modulestore_type(course_id), ModuleStoreEnum.Type.xml)
form_data = {'course_id': course_id.to_deprecated_string(), 'email_enabled': True}
form = CourseAuthorizationAdminForm(data=form_data)
# Validation shouldn't work
self.assertFalse(form.is_valid())
msg = u"Course Email feature is only available for courses authored in Studio. "
msg += u'"{0}" appears to be an XML backed course.'.format(course_id.to_deprecated_string())
self.assertEquals(msg, form._errors['course_id'][0]) # pylint: disable=protected-access
with self.assertRaisesRegexp(
ValueError,
"The CourseAuthorization could not be created because the data didn't validate."
):
form.save()
class CourseEmailTemplateFormTest(ModuleStoreTestCase):
"""Test the CourseEmailTemplateForm that is used in the Django admin subsystem."""
def test_missing_message_body_in_html(self):
"""
Asserts that we fail validation if we do not have the {{message_body}} tag
in the submitted HTML template
"""
form_data = {
'html_template': '',
'plain_template': '{{message_body}}',
'name': ''
}
form = CourseEmailTemplateForm(form_data)
self.assertFalse(form.is_valid())
def test_missing_message_body_in_plain(self):
"""
Asserts that we fail validation if we do not have the {{message_body}} tag
in the submitted plain template
"""
form_data = {
'html_template': '{{message_body}}',
'plain_template': '',
'name': ''
}
form = CourseEmailTemplateForm(form_data)
self.assertFalse(form.is_valid())
def test_blank_name_is_null(self):
"""
Asserts that submitting a CourseEmailTemplateForm with a blank name is stored
as a NULL in the database
"""
form_data = {
'html_template': '{{message_body}}',
'plain_template': '{{message_body}}',
'name': ''
}
form = CourseEmailTemplateForm(form_data)
self.assertTrue(form.is_valid())
form.save()
# now inspect the database and make sure the blank name was stored as a NULL
# Note this will throw an exception if it is not found
cet = CourseEmailTemplate.objects.get(name=None)
self.assertIsNotNone(cet)
def test_name_with_only_spaces_is_null(self):
"""
Asserts that submitting a CourseEmailTemplateForm just blank whitespace is stored
as a NULL in the database
"""
form_data = {
'html_template': '{{message_body}}',
'plain_template': '{{message_body}}',
'name': ' '
}
form = CourseEmailTemplateForm(form_data)
self.assertTrue(form.is_valid())
form.save()
# now inspect the database and make sure the whitespace only name was stored as a NULL
# Note this will throw an exception if it is not found
cet = CourseEmailTemplate.objects.get(name=None)
self.assertIsNotNone(cet)
def test_name_with_spaces_is_trimmed(self):
"""
Asserts that submitting a CourseEmailTemplateForm with a name that contains
whitespace at the beginning or end of a name is stripped
"""
form_data = {
'html_template': '{{message_body}}',
'plain_template': '{{message_body}}',
'name': ' foo '
}
form = CourseEmailTemplateForm(form_data)
self.assertTrue(form.is_valid())
form.save()
# now inspect the database and make sure the name is properly
# stripped
cet = CourseEmailTemplate.objects.get(name='foo')
self.assertIsNotNone(cet)
def test_non_blank_name(self):
"""
Asserts that submitting a CourseEmailTemplateForm with a non-blank name
can be found in the database under than name as a look-up key
"""
form_data = {
'html_template': '{{message_body}}',
'plain_template': '{{message_body}}',
'name': 'foo'
}
form = CourseEmailTemplateForm(form_data)
self.assertTrue(form.is_valid())
form.save()
# now inspect the database and make sure the blank name was stored as a NULL
# Note this will throw an exception if it is not found
cet = CourseEmailTemplate.objects.get(name='foo')
self.assertIsNotNone(cet)
def test_duplicate_name(self):
"""
Assert that we cannot submit a CourseEmailTemplateForm with a name
that already exists
"""
# first set up one template
form_data = {
'html_template': '{{message_body}}',
'plain_template': '{{message_body}}',
'name': 'foo'
}
form = CourseEmailTemplateForm(form_data)
self.assertTrue(form.is_valid())
form.save()
# try to submit form with the same name
form = CourseEmailTemplateForm(form_data)
self.assertFalse(form.is_valid())
# try again with a name with extra whitespace
# this should fail as we strip the whitespace away
form_data = {
'html_template': '{{message_body}}',
'plain_template': '{{message_body}}',
'name': ' foo '
}
form = CourseEmailTemplateForm(form_data)
self.assertFalse(form.is_valid())
# then try a different name
form_data = {
'html_template': '{{message_body}}',
'plain_template': '{{message_body}}',
'name': 'bar'
}
form = CourseEmailTemplateForm(form_data)
self.assertTrue(form.is_valid())
form.save()
form = CourseEmailTemplateForm(form_data)
self.assertFalse(form.is_valid())
|
agpl-3.0
|
riklaunim/django-custom-multisite
|
django/contrib/gis/db/backends/postgis/models.py
|
403
|
1970
|
"""
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.2.2.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
|
bsd-3-clause
|
vwvww/servo
|
components/script/dom/bindings/codegen/parser/tests/test_treatNonCallableAsNull.py
|
170
|
1701
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
[TreatNonCallableAsNull] callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull1 {
attribute Function? onfoo;
attribute Function onbar;
};
""")
results = parser.finish()
iface = results[1]
attr = iface.members[0]
harness.check(attr.type.treatNonCallableAsNull(), True, "Got the expected value")
attr = iface.members[1]
harness.check(attr.type.treatNonCallableAsNull(), False, "Got the expected value")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
interface TestTreatNonCallableAsNull2 {
[TreatNonCallableAsNull] attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
callback Function = any(any... arguments);
[TreatNonCallableAsNull]
interface TestTreatNonCallableAsNull3 {
attribute Function onfoo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[TreatNonCallableAsNull, TreatNonObjectAsNull]
callback Function = any(any... arguments);
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
|
mpl-2.0
|
palfrey/coherence
|
coherence/upnp/core/DIDLLite.py
|
1
|
34516
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2005, Tim Potter <[email protected]>
# Copyright 2006, Frank Scholz <[email protected]>
"""
TODO:
- use more XPath expressions in fromElement() methods
"""
import os
import string
import urllib
from datetime import datetime
DC_NS = 'http://purl.org/dc/elements/1.1/'
UPNP_NS = 'urn:schemas-upnp-org:metadata-1-0/upnp/'
my_namespaces = { DC_NS: 'dc',
UPNP_NS: 'upnp'
}
from coherence.extern.et import ET, namespace_map_update, ElementInterface
namespace_map_update(my_namespaces)
from coherence.upnp.core import utils
from coherence.upnp.core import dlna
from coherence import log
def qname(tag,ns=''):
if len(ns) == 0:
return tag
return "{%s}%s" % (ns,tag)
def is_audio(mimetype):
""" checks for type audio,
expects a mimetype or an UPnP
protocolInfo
"""
test = mimetype.split(':')
if len(test) == 4:
mimetype = test[2]
if mimetype == 'application/ogg':
return True
if mimetype.startswith('audio/'):
return True
return False
class Resources(list):
""" a list of resources, always sorted after an append """
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
self.sort(cmp=self.p_sort)
def append(self, value):
list.append(self,value)
self.sort(cmp=self.p_sort)
def p_sort(self,x,y):
""" we want the following order
http-get is always at the beginning
rtsp-rtp-udp the second
anything else after that
"""
if x.protocolInfo == None:
return 1
if y.protocolInfo == None:
return -1
x_protocol = x.protocolInfo.split(':')[0]
y_protocol = y.protocolInfo.split(':')[0]
x_protocol = x_protocol.lower()
y_protocol = y_protocol.lower()
if( x_protocol == y_protocol):
return 0
if(x_protocol == 'http-get'):
return -1
if(x_protocol == 'rtsp-rtp-udp' and y_protocol == 'http-get'):
return 1
if(x_protocol == 'rtsp-rtp-udp' and y_protocol != 'http-get'):
return -1
return 1
def get_matching(self, local_protocol_infos, protocol_type = None):
result = []
if not isinstance(local_protocol_infos, list):
local_protocol_infos = [local_protocol_infos]
for res in self:
if res.importUri != None:
continue
#print "res", res.protocolInfo, res.data
remote_protocol,remote_network,remote_content_format,_ = res.protocolInfo.split(':')
#print "remote", remote_protocol,remote_network,remote_content_format
if(protocol_type is not None and
remote_protocol.lower() != protocol_type.lower()):
continue
for protocol_info in local_protocol_infos:
local_protocol,local_network,local_content_format,_ = protocol_info.split(':')
#print "local", local_protocol,local_network,local_content_format
if((remote_protocol == local_protocol or
remote_protocol == '*' or
local_protocol == '*') and
(remote_network == local_network or
remote_network == '*' or
local_network == '*') and
(remote_content_format == local_content_format or
remote_content_format == '*' or
local_content_format == '*')):
#print result, res
result.append(res)
return result
def classChooser(mimetype, sub=None):
if mimetype == 'root':
return Container
if mimetype == 'item':
return Item
if mimetype == 'directory':
if sub == 'music':
return MusicAlbum
return Container
else:
if string.find (mimetype,'image/') == 0:
return Photo
if string.find (mimetype,'audio/') == 0:
if sub == 'music': # FIXME: this is stupid
return MusicTrack
return AudioItem
if string.find (mimetype,'video/') == 0:
return VideoItem
if mimetype == 'application/ogg':
if sub == 'music': # FIXME: this is stupid
return MusicTrack
return AudioItem
if mimetype == 'application/x-flac':
if sub == 'music': # FIXME: this is stupid
return MusicTrack
return AudioItem
return None
simple_dlna_tags = ['DLNA.ORG_OP=01', # operations parameter
'DLNA.ORG_PS=1', # play speed parameter
'DLNA.ORG_CI=0', # transcoded parameter
'DLNA.ORG_FLAGS=01100000000000000000000000000000']
def build_dlna_additional_info(content_format,does_playcontainer=False):
additional_info = ['*']
if content_format == 'audio/mpeg':
additional_info = ['DLNA.ORG_PN=MP3']+simple_dlna_tags
if content_format == 'audio/ms-wma':
additional_info = ['DLNA.ORG_PN=WMABASE']+simple_dlna_tags
if content_format == 'image/jpeg':
dlna_tags = simple_dlna_tags[:]
dlna_tags[3] = 'DLNA.ORG_FLAGS=00900000000000000000000000000000'
additional_info = ['DLNA.ORG_PN=JPEG_LRG']+dlna_tags
if content_format == 'image/png':
dlna_tags = simple_dlna_tags[:]
dlna_tags[3] = 'DLNA.ORG_FLAGS=00900000000000000000000000000000'
additional_info = ['DLNA.ORG_PN=PNG_LRG']+dlna_tags
if content_format == 'video/mpeg':
additional_info = ['DLNA.ORG_PN=MPEG_PS_PAL']+simple_dlna_tags
if content_format == 'video/mpegts':
additional_info = ['DLNA.ORG_PN=MPEG_TS_PAL']+simple_dlna_tags
content_format = 'video/mpeg'
if content_format in ['video/mp4','video/x-m4a']:
additional_info = ['DLNA.ORG_PN=AVC_TS_BL_CIF15_AAC']+simple_dlna_tags
if content_format == 'video/x-msvideo':
#additional_info = ';'.join(['DLNA.ORG_PN=MPEG4_P2_MP4_SP_AAC']+simple_dlna_tags)
additional_info = ['*']
if content_format == 'video/x-ms-wmv':
additional_info = ['DLNA.ORG_PN=WMV_BASE']+simple_dlna_tags
if content_format == '*':
additional_info = simple_dlna_tags
if does_playcontainer == True:
i = 0
for part in additional_info:
if part.startswith('DLNA.ORG_FLAGS'):
_,bits = part.split('=')
bits = int(bits,16)
bits |= 0x10000000000000000000000000000000
additional_info[i] = 'DLNA.ORG_FLAGS=%.32x' % bits
break
i += 1
return ';'.join(additional_info)
class Resource(object):
"""An object representing a resource."""
def __init__(self, data=None, protocolInfo=None):
self.data = data
self.protocolInfo = protocolInfo
self.bitrate = None
self.size = None
self.duration = None
self.resolution = None
self.importUri = None
if self.protocolInfo is not None:
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
if additional_info == '*':
self.protocolInfo = ':'.join((protocol,network,content_format,build_dlna_additional_info(content_format)))
def get_additional_info(self,upnp_client=''):
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
if upnp_client in ('XBox','Philips-TV',):
""" we don't need the DLNA tags there,
and maybe they irritates these poor things anyway
"""
additional_info = '*'
elif upnp_client in ('PLAYSTATION3',):
if content_format.startswith('video/'):
additional_info = '*'
a_list = additional_info.split(';')
for part in a_list:
if part == 'DLNA.ORG_PS=1':
a_list.remove(part)
break
additional_info = ';'.join(a_list)
return additional_info
def toElement(self,**kwargs):
root = ET.Element('res')
if kwargs.get('upnp_client','') in ('XBox',):
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
if content_format == 'video/x-msvideo':
content_format = 'video/avi'
if content_format == 'audio/x-wav':
content_format = 'audio/wav'
additional_info = self.get_additional_info(upnp_client=kwargs.get('upnp_client',''))
root.attrib['protocolInfo'] = ':'.join((protocol,network,content_format,additional_info))
else:
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
if content_format == 'video/x-msvideo':
content_format = 'video/avi'
additional_info = self.get_additional_info(upnp_client=kwargs.get('upnp_client',''))
root.attrib['protocolInfo'] = ':'.join((protocol,network,content_format,additional_info))
root.text = self.data
if self.bitrate is not None:
root.attrib['bitrate'] = str(self.bitrate)
if self.size is not None:
root.attrib['size'] = str(self.size)
if self.duration is not None:
root.attrib['duration'] = self.duration
if self.resolution is not None:
root.attrib['resolution'] = self.resolution
if self.importUri is not None:
root.attrib['importUri'] = self.importUri
return root
def fromElement(self, elt):
self.protocolInfo = elt.attrib['protocolInfo']
self.data = elt.text
self.bitrate = elt.attrib.get('bitrate')
self.size = elt.attrib.get('size')
self.duration = elt.attrib.get('duration',None)
self.resolution = elt.attrib.get('resolution',None)
self.importUri = elt.attrib.get('importUri',None)
def toString(self,**kwargs):
return ET.tostring(self.toElement(**kwargs),encoding='utf-8')
@classmethod
def fromString(cls, aString):
instance = cls()
elt = utils.parse_xml(aString)
#elt = ElementTree(elt)
instance.fromElement(elt.getroot())
return instance
def transcoded(self,format):
protocol,network,content_format,additional_info = self.protocolInfo.split(':')
dlna_tags = simple_dlna_tags[:]
#dlna_tags[1] = 'DLNA.ORG_OP=00'
dlna_tags[2] = 'DLNA.ORG_CI=1'
if format == 'mp3':
if content_format == 'audio/mpeg':
return None
content_format='audio/mpeg'
dlna_pn = 'DLNA.ORG_PN=MP3'
elif format == 'lpcm':
dlna_pn = 'DLNA.ORG_PN=LPCM'
content_format='audio/L16;rate=44100;channels=2'
else:
return None
additional_info = ';'.join([dlna_pn]+dlna_tags)
new_protocol_info = ':'.join((protocol,network,content_format,additional_info))
new_res = Resource(self.data+'/transcoded/%s' % format,
new_protocol_info)
new_res.size = None
new_res.duration = self.duration
new_res.resolution = self.resolution
return new_res
class PlayContainerResource(Resource):
"""An object representing a DLNA playcontainer resource."""
def __init__(self, udn, sid='urn:upnp-org:serviceId:ContentDirectory',
cid=None,
fid=None,
fii=0,
sc='',md=0,
protocolInfo=None):
if cid == None:
raise AttributeError('missing Container Id')
if fid == None:
raise AttributeError('missing first Child Id')
self.protocolInfo = protocolInfo
self.bitrate = None
self.size = None
self.duration = None
self.resolution = None
self.importUri = None
args = ['sid=' + urllib.quote(sid),
'cid=' + urllib.quote(str(cid)),
'fid=' + urllib.quote(str(fid)),
'fii=' + urllib.quote(str(fii)),
'sc=' + urllib.quote(''),
'md=' + urllib.quote(str(0))]
self.data = 'dlna-playcontainer://' + urllib.quote(str(udn)) \
+ '?' + '&'.join(args)
if self.protocolInfo == None:
self.protocolInfo = 'http-get:*:*:*'
class Object(log.Loggable):
"""The root class of the entire content directory class heirachy."""
logCategory = 'didllite'
upnp_class = 'object'
creator = None
res = None
writeStatus = None
date = None
albumArtURI = None
artist = None
album = None
originalTrackNumber=None
description = None
longDescription = None
refID = None
server_uuid = None
def __init__(self, id=None, parentID=None, title=None, restricted=False,
creator=None):
self.id = id
self.parentID = parentID
self.title = title
self.creator = creator
self.restricted = restricted
self.res = Resources()
def checkUpdate(self):
return self
def toElement(self,**kwargs):
root = ET.Element(self.elementName)
#if self.id == 1000:
# root.attrib['id'] = '0'
# ET.SubElement(root, 'dc:title').text = 'root'
#else:
# root.attrib['id'] = str(self.id)
# ET.SubElement(root, 'dc:title').text = self.title
root.attrib['id'] = str(self.id)
ET.SubElement(root, qname('title',DC_NS)).text = self.title
#if self.title != None:
# ET.SubElement(root, 'dc:title').text = self.title
#else:
# ET.SubElement(root, 'dc:title').text = 'root'
root.attrib['parentID'] = str(self.parentID)
if(kwargs.get('upnp_client','') != 'XBox'):
if self.refID:
root.attrib['refID'] = str(self.refID)
if kwargs.get('requested_id',None):
if kwargs.get('requested_id') == '0':
t = root.find(qname('title',DC_NS))
t.text = 'root'
#if kwargs.get('requested_id') != '0' and kwargs.get('requested_id') != root.attrib['id']:
if kwargs.get('requested_id') != root.attrib['id']:
if(kwargs.get('upnp_client','') != 'XBox'):
root.attrib['refID'] = root.attrib['id']
r_id = kwargs.get('requested_id')
root.attrib['id'] = r_id
r_id = r_id.split('@',1)
try:
root.attrib['parentID'] = r_id[1]
except IndexError:
pass
if(kwargs.get('upnp_client','') != 'XBox'):
self.info("Changing ID from %r to %r, with parentID %r", root.attrib['refID'], root.attrib['id'], root.attrib['parentID'])
else:
self.info("Changing ID from %r to %r, with parentID %r", self.id, root.attrib['id'], root.attrib['parentID'])
elif kwargs.get('parent_container',None):
if(kwargs.get('parent_container') != '0' and
kwargs.get('parent_container') != root.attrib['parentID']):
if(kwargs.get('upnp_client','') != 'XBox'):
root.attrib['refID'] = root.attrib['id']
root.attrib['id'] = '@'.join((root.attrib['id'],kwargs.get('parent_container')))
root.attrib['parentID'] = kwargs.get('parent_container')
if(kwargs.get('upnp_client','') != 'XBox'):
self.info("Changing ID from %r to %r, with parentID from %r to %r", root.attrib['refID'], root.attrib['id'], self.parentID, root.attrib['parentID'])
else:
self.info("Changing ID from %r to %r, with parentID from %r to %r", self.id, root.attrib['id'], self.parentID, root.attrib['parentID'])
ET.SubElement(root, qname('class',UPNP_NS)).text = self.upnp_class
if kwargs.get('upnp_client','') == 'XBox':
u = root.find(qname('class',UPNP_NS))
if(kwargs.get('parent_container',None) != None and
u.text.startswith('object.container')):
if kwargs.get('parent_container') in ('14','15','16'):
u.text = 'object.container.storageFolder'
if self.upnp_class == 'object.container':
u.text = 'object.container.storageFolder'
if self.restricted:
root.attrib['restricted'] = '1'
else:
root.attrib['restricted'] = '0'
if self.creator is not None:
ET.SubElement(root, qname('creator',DC_NS)).text = self.creator
if self.writeStatus is not None:
ET.SubElement(root, qname('writeStatus',UPNP_NS)).text = self.writeStatus
if self.date is not None:
if isinstance(self.date, datetime):
ET.SubElement(root, qname('date',DC_NS)).text = self.date.isoformat()
else:
ET.SubElement(root, qname('date',DC_NS)).text = self.date
else:
ET.SubElement(root, qname('date',DC_NS)).text = utils.datefaker().isoformat()
if self.albumArtURI is not None:
e = ET.SubElement(root, qname('albumArtURI',UPNP_NS))
e.text = self.albumArtURI
e.attrib['xmlns:dlna'] = 'urn:schemas-dlna-org:metadata-1-0'
e.attrib['dlna:profileID'] = 'JPEG_TN'
if self.artist is not None:
ET.SubElement(root, qname('artist',UPNP_NS)).text = self.artist
if self.originalTrackNumber is not None:
ET.SubElement(root, qname('originalTrackNumber',UPNP_NS)).text = str(self.originalTrackNumber)
if self.description is not None:
ET.SubElement(root, qname('description',DC_NS)).text = self.description
if self.longDescription is not None:
ET.SubElement(root, qname('longDescription',UPNP_NS)).text = self.longDescription
if self.server_uuid is not None:
ET.SubElement(root, qname('server_uuid',UPNP_NS)).text = self.server_uuid
return root
def toString(self,**kwargs):
return ET.tostring(self.toElement(**kwargs),encoding='utf-8')
def fromElement(self, elt):
"""
TODO:
* creator
* writeStatus
"""
self.elementName = elt.tag
self.id = elt.attrib.get('id',None)
self.parentID = elt.attrib.get('parentID',None)
self.refID = elt.attrib.get('refID',None)
if elt.attrib.get('restricted',None) in [1,'true','True','1','yes','Yes']:
self.restricted = True
else:
self.restricted = False
for child in elt.getchildren():
if child.tag.endswith('title'):
self.title = child.text
elif child.tag.endswith('albumArtURI'):
self.albumArtURI = child.text
elif child.tag.endswith('originalTrackNumber'):
self.originalTrackNumber = int(child.text)
elif child.tag.endswith('description'):
self.description = child.text
elif child.tag.endswith('longDescription'):
self.longDescription = child.text
elif child.tag.endswith('artist'):
self.artist = child.text
elif child.tag.endswith('album'):
self.album = child.text
elif child.tag.endswith('class'):
self.upnp_class = child.text
elif child.tag.endswith('server_uuid'):
self.server_uuid = child.text
elif child.tag.endswith('res'):
res = Resource.fromString(ET.tostring(child))
self.res.append(res)
@classmethod
def fromString(cls, data):
instance = cls()
elt = utils.parse_xml(data)
#elt = ElementTree(elt)
instance.fromElement(elt.getroot())
return instance
class Item(Object):
"""A class used to represent atomic (non-container) content
objects."""
upnp_class = Object.upnp_class + '.item'
elementName = 'item'
refID = None
director = None
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
def toElement(self,**kwargs):
root = Object.toElement(self,**kwargs)
if self.director is not None:
ET.SubElement(root, qname('director',UPNP_NS)).text = self.director
if self.refID is not None:
ET.SubElement(root, 'refID').text = self.refID
if kwargs.get('transcoding',False) == True:
res = self.res.get_matching(['*:*:*:*'], protocol_type='http-get')
if len(res) > 0 and is_audio(res[0].protocolInfo):
old_res = res[0]
if(kwargs.get('upnp_client','') == 'XBox'):
transcoded_res = old_res.transcoded('mp3')
if transcoded_res != None:
root.append(transcoded_res.toElement(**kwargs))
else:
root.append(old_res.toElement(**kwargs))
else:
for res in self.res:
root.append(res.toElement(**kwargs))
transcoded_res = old_res.transcoded('lpcm')
if transcoded_res != None:
root.append(transcoded_res.toElement(**kwargs))
else:
for res in self.res:
root.append(res.toElement(**kwargs))
else:
for res in self.res:
root.append(res.toElement(**kwargs))
return root
def fromElement(self, elt):
Object.fromElement(self, elt)
for child in elt.getchildren():
if child.tag.endswith('refID'):
self.refID = child.text
elif child.tag.endswith('director'):
self.director = child.text
class ImageItem(Item):
upnp_class = Item.upnp_class + '.imageItem'
rating = None
storageMedium = None
publisher = None
rights = None
def toElement(self,**kwargs):
root = Item.toElement(self,**kwargs)
if self.rating is not None:
ET.SubElement(root, qname('rating',UPNP_NS)).text = str(self.rating)
if self.storageMedium is not None:
ET.SubElement(root, qname('storageMedium',UPNP_NS)).text = self.storageMedium
if self.publisher is not None:
ET.SubElement(root, qname('publisher',DC_NS)).text = self.contributor
if self.rights is not None:
ET.SubElement(root, qname('rights',DC_NS)).text = self.rights
return root
class Photo(ImageItem):
upnp_class = ImageItem.upnp_class + '.photo'
album = None
def toElement(self,**kwargs):
root = ImageItem.toElement(self,**kwargs)
if self.album is not None:
ET.SubElement(root, qname('album',UPNP_NS)).text = self.album
return root
class AudioItem(Item):
"""A piece of content that when rendered generates some audio."""
upnp_class = Item.upnp_class + '.audioItem'
genre = None
publisher = None
language = None
relation = None
rights = None
valid_keys = ['genre', 'description', 'longDescription', 'publisher',
'language', 'relation', 'rights', 'albumArtURI']
#@dlna.AudioItem
def toElement(self,**kwargs):
root = Item.toElement(self,**kwargs)
if self.genre is not None:
ET.SubElement(root, qname('genre',UPNP_NS)).text = self.genre
if self.publisher is not None:
ET.SubElement(root, qname('publisher',DC_NS)).text = self.publisher
if self.language is not None:
ET.SubElement(root, qname('language',DC_NS)).text = self.language
if self.relation is not None:
ET.SubElement(root, qname('relation',DC_NS)).text = self.relation
if self.rights is not None:
ET.SubElement(root, qname('rights',DC_NS)).text = self.rights
return root
def fromElement(self, elt):
Item.fromElement(self, elt)
for child in elt.getchildren():
tag = child.tag
val = child.text
if tag in self.valid_keys:
setattr(self, tag, val)
class MusicTrack(AudioItem):
"""A discrete piece of audio that should be interpreted as music."""
upnp_class = AudioItem.upnp_class + '.musicTrack'
album = None
playlist = None
storageMedium = None
contributor = None
def toElement(self,**kwargs):
root = AudioItem.toElement(self,**kwargs)
if self.album is not None:
ET.SubElement(root, qname('album',UPNP_NS)).text = self.album
if self.playlist is not None:
ET.SubElement(root, qname('playlist',UPNP_NS)).text = self.playlist
if self.storageMedium is not None:
ET.SubElement(root, qname('storageMedium',UPNP_NS)).text = self.storageMedium
if self.contributor is not None:
ET.SubElement(root, qname('contributor',DC_NS)).text = self.contributor
return root
class AudioBroadcast(AudioItem):
upnp_class = AudioItem.upnp_class + '.audioBroadcast'
class AudioBook(AudioItem):
upnp_class = AudioItem.upnp_class + '.audioBook'
class VideoItem(Item):
upnp_class = Item.upnp_class + '.videoItem'
class Movie(VideoItem):
upnp_class = VideoItem.upnp_class + '.movie'
class VideoBroadcast(VideoItem):
upnp_class = VideoItem.upnp_class + '.videoBroadcast'
class MusicVideoClip(VideoItem):
upnp_class = VideoItem.upnp_class + '.musicVideoClip'
class PlaylistItem(Item):
upnp_class = Item.upnp_class + '.playlistItem'
class TextItem(Item):
upnp_class = Item.upnp_class + '.textItem'
class Container(Object):
"""An object that can contain other objects."""
upnp_class = Object.upnp_class + '.container'
elementName = 'container'
childCount = None
createClass = None
searchable = None
def __init__(self, id=None, parentID=None, title=None,
restricted = False, creator = None):
Object.__init__(self, id, parentID, title, restricted, creator)
self.searchClass = []
def toElement(self,**kwargs):
root = Object.toElement(self,**kwargs)
if self.childCount is not None:
root.attrib['childCount'] = str(self.childCount)
if self.createClass is not None:
ET.SubElement(root, qname('createclass',UPNP_NS)).text = self.createClass
if not isinstance(self.searchClass, (list, tuple)):
self.searchClass = [self.searchClass]
for i in self.searchClass:
sc = ET.SubElement(root, qname('searchClass',UPNP_NS))
sc.attrib['includeDerived'] = '1'
sc.text = i
if self.searchable is not None:
if self.searchable in (1, '1', True, 'true', 'True'):
root.attrib['searchable'] = '1'
else:
root.attrib['searchable'] = '0'
for res in self.res:
root.append(res.toElement(**kwargs))
return root
def fromElement(self, elt):
Object.fromElement(self, elt)
v = elt.attrib.get('childCount',None)
if v is not None:
self.childCount = int(v)
#self.searchable = int(elt.attrib.get('searchable','0'))
self.searchable = elt.attrib.get('searchable','0') in [1,'True','true','1']
self.searchClass = []
for child in elt.getchildren():
if child.tag.endswith('createclass'):
self.createClass = child.text
elif child.tag.endswith('searchClass'):
self.searchClass.append(child.text)
class Person(Container):
upnp_class = Container.upnp_class + '.person'
class MusicArtist(Person):
upnp_class = Person.upnp_class + '.musicArtist'
class PlaylistContainer(Container):
upnp_class = Container.upnp_class + '.playlistContainer'
class Album(Container):
upnp_class = Container.upnp_class + '.album'
class MusicAlbum(Album):
upnp_class = Album.upnp_class + '.musicAlbum'
class PhotoAlbum(Album):
upnp_class = Album.upnp_class + '.photoAlbum'
class Genre(Container):
upnp_class = Container.upnp_class + '.genre'
class MusicGenre(Genre):
upnp_class = Genre.upnp_class + '.musicGenre'
class MovieGenre(Genre):
upnp_class = Genre.upnp_class + '.movieGenre'
class StorageSystem(Container):
upnp_class = Container.upnp_class + '.storageSystem'
class StorageVolume(Container):
upnp_class = Container.upnp_class + '.storageVolume'
class StorageFolder(Container):
upnp_class = Container.upnp_class + '.storageFolder'
class DIDLElement(ElementInterface,log.Loggable):
logCategory = 'didllite'
def __init__(self, upnp_client='',
parent_container=None,requested_id=None,
transcoding=False):
ElementInterface.__init__(self, 'DIDL-Lite', {})
self.attrib['xmlns'] = 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/'
self.attrib['xmlns:dc'] = 'http://purl.org/dc/elements/1.1/'
self.attrib['xmlns:upnp'] = 'urn:schemas-upnp-org:metadata-1-0/upnp/'
self.attrib['xmlns:dlna'] = 'urn:schemas-dlna-org:metadata-1-0'
self.attrib['xmlns:pv'] = 'http://www.pv.com/pvns/'
self._items = []
self.upnp_client = upnp_client
self.parent_container = parent_container
self.requested_id = requested_id
self.transcoding = transcoding
def addContainer(self, id, parentID, title, restricted = False):
e = Container(id, parentID, title, restricted, creator = '')
self.append(e.toElement())
def addItem(self, item):
self.append(item.toElement(upnp_client=self.upnp_client,
parent_container=self.parent_container,
requested_id=self.requested_id,
transcoding=self.transcoding))
self._items.append(item)
def numItems(self):
return len(self)
def getItems(self):
return self._items
def toString(self):
""" sigh - having that optional preamble here
breaks some of the older ContentDirectoryClients
"""
#preamble = """<?xml version="1.0" encoding="utf-8"?>"""
#return preamble + ET.tostring(self,encoding='utf-8')
return ET.tostring(self,encoding='utf-8')
def get_upnp_class(self,name):
try:
return upnp_classes[name]()
except KeyError:
self.warning("upnp_class %r not found, trying fallback", name)
parts = name.split('.')
parts.pop()
while len(parts) > 1:
try:
return upnp_classes['.'.join(parts)]()
except KeyError:
parts.pop()
self.warning("WTF - no fallback for upnp_class %r found ?!?", name)
return None
@classmethod
def fromString(cls, aString):
instance = cls()
elt = utils.parse_xml(aString, 'utf-8')
elt = elt.getroot()
for node in elt.getchildren():
upnp_class_name = node.findtext('{%s}class' % 'urn:schemas-upnp-org:metadata-1-0/upnp/')
upnp_class = instance.get_upnp_class(upnp_class_name.strip())
new_node = upnp_class.fromString(ET.tostring(node))
instance.addItem(new_node)
return instance
def element_to_didl(item):
""" a helper method to create a DIDLElement out of one ET element
or XML fragment string
"""
if not isinstance(item,basestring):
item = ET.tostring(item)
didl = """<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:dlna="urn:schemas-dlna-org:metadata-1-0"
xmlns:pv="http://www.pv.com/pvns/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">""" \
+ item + \
"""</DIDL-Lite>"""
return didl
upnp_classes = {'object': Object,
'object.item': Item,
'object.item.imageItem': ImageItem,
'object.item.imageItem.photo': Photo,
'object.item.audioItem': AudioItem,
'object.item.audioItem.musicTrack': MusicTrack,
'object.item.audioItem.audioBroadcast': AudioBroadcast,
'object.item.audioItem.audioBook': AudioBook,
'object.item.videoItem': VideoItem,
'object.item.videoItem.movie': Movie,
'object.item.videoItem.videoBroadcast': VideoBroadcast,
'object.item.videoItem.musicVideoClip': MusicVideoClip,
'object.item.playlistItem': PlaylistItem,
'object.item.textItem': TextItem,
'object.container': Container,
'object.container.person': Person,
'object.container.person.musicArtist': MusicArtist,
'object.container.playlistContainer': PlaylistContainer,
'object.container.album': Album,
'object.container.album.musicAlbum': MusicAlbum,
'object.container.album.photoAlbum': PhotoAlbum,
'object.container.genre': Genre,
'object.container.genre.musicGenre': MusicGenre,
'object.container.genre.movieGenre': MovieGenre,
'object.container.storageSystem': StorageSystem,
'object.container.storageVolume': StorageVolume,
'object.container.storageFolder': StorageFolder,
}
if __name__ == '__main__':
res = Resources()
res.append(Resource('1','file:*:*:*'))
res.append(Resource('2','rtsp-rtp-udp:*:*:*'))
res.append(Resource('3',None))
res.append(Resource('4','internal:*:*:*'))
res.append(Resource('5','http-get:*:*:*'))
res.append(Resource('6','something:*:*:*'))
res.append(Resource('7','http-get:*:*:*'))
for r in res:
print r.data, r.protocolInfo
|
mit
|
miroag/mfs
|
ci/appveyor-download.py
|
1
|
3807
|
#!/usr/bin/env python
"""
Use the AppVeyor API to download Windows artifacts.
Taken from: https://bitbucket.org/ned/coveragepy/src/tip/ci/download_appveyor.py
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""
from __future__ import unicode_literals
import argparse
import os
import zipfile
import requests
def make_auth_headers():
"""Make the authentication headers needed to use the Appveyor API."""
path = os.path.expanduser("~/.appveyor.token")
if not os.path.exists(path):
raise RuntimeError(
"Please create a file named `.appveyor.token` in your home directory. "
"You can get the token from https://ci.appveyor.com/api-token"
)
with open(path) as f:
token = f.read().strip()
headers = {
'Authorization': 'Bearer {}'.format(token),
}
return headers
def download_latest_artifacts(account_project, build_id):
"""Download all the artifacts from the latest build."""
if build_id is None:
url = "https://ci.appveyor.com/api/projects/{}".format(account_project)
else:
url = "https://ci.appveyor.com/api/projects/{}/build/{}".format(account_project, build_id)
build = requests.get(url, headers=make_auth_headers()).json()
jobs = build['build']['jobs']
print(u"Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs)))
for job in jobs:
name = job['name']
print(u" {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts".format(job['jobId'])
response = requests.get(url, headers=make_auth_headers())
artifacts = response.json()
for artifact in artifacts:
is_zip = artifact['type'] == "Zip"
filename = artifact['fileName']
print(u" {0}, {1} bytes".format(filename, artifact['size']))
url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts/{}".format(job['jobId'], filename)
download_url(url, filename, make_auth_headers())
if is_zip:
unpack_zipfile(filename)
os.remove(filename)
def ensure_dirs(filename):
"""Make sure the directories exist for `filename`."""
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
def download_url(url, filename, headers):
"""Download a file from `url` to `filename`."""
ensure_dirs(filename)
response = requests.get(url, headers=headers, stream=True)
if response.status_code == 200:
with open(filename, 'wb') as f:
for chunk in response.iter_content(16 * 1024):
f.write(chunk)
else:
print(u" Error downloading {}: {}".format(url, response))
def unpack_zipfile(filename):
"""Unpack a zipfile, using the names in the zip."""
with open(filename, 'rb') as fzip:
z = zipfile.ZipFile(fzip)
for name in z.namelist():
print(u" extracting {}".format(name))
ensure_dirs(name)
z.extract(name)
parser = argparse.ArgumentParser(description='Download artifacts from AppVeyor.')
parser.add_argument('--id',
metavar='PROJECT_ID',
default='miroag/mfs',
help='Project ID in AppVeyor.')
parser.add_argument('build',
nargs='?',
metavar='BUILD_ID',
help='Build ID in AppVeyor. Eg: master-123')
if __name__ == "__main__":
# import logging
# logging.basicConfig(level="DEBUG")
args = parser.parse_args()
download_latest_artifacts(args.id, args.build)
|
mit
|
liaoch/linux-xlnx
|
scripts/checkkconfigsymbols.py
|
216
|
4661
|
#!/usr/bin/env python
"""Find Kconfig identifiers that are referenced but not defined."""
# (c) 2014 Valentin Rothberg <[email protected]>
# (c) 2014 Stefan Hengelein <[email protected]>
#
# Licensed under the terms of the GNU GPL License version 2
import os
import re
from subprocess import Popen, PIPE, STDOUT
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
STMT = r"^\s*(?:if|select|depends\s+on)\s+" + EXPR
SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
REGEX_FEATURE = re.compile(r"(" + FEATURE + r")")
REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
def main():
"""Main function of this module."""
source_files = []
kconfig_files = []
defined_features = set()
referenced_features = dict() # {feature: [files]}
# use 'git ls-files' to get the worklist
pop = Popen("git ls-files", stdout=PIPE, stderr=STDOUT, shell=True)
(stdout, _) = pop.communicate() # wait until finished
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
for gitfile in stdout.rsplit("\n"):
if ".git" in gitfile or "ChangeLog" in gitfile or \
".log" in gitfile or os.path.isdir(gitfile):
continue
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
else:
# all non-Kconfig files are checked for consistency
source_files.append(gitfile)
for sfile in source_files:
parse_source_file(sfile, referenced_features)
for kfile in kconfig_files:
parse_kconfig_file(kfile, defined_features, referenced_features)
print "Undefined symbol used\tFile list"
for feature in sorted(referenced_features):
# filter some false positives
if feature == "FOO" or feature == "BAR" or \
feature == "FOO_BAR" or feature == "XXX":
continue
if feature not in defined_features:
if feature.endswith("_MODULE"):
# avoid false positives for kernel modules
if feature[:-len("_MODULE")] in defined_features:
continue
files = referenced_features.get(feature)
print "%s\t%s" % (feature, ", ".join(files))
def parse_source_file(sfile, referenced_features):
"""Parse @sfile for referenced Kconfig features."""
lines = []
with open(sfile, "r") as stream:
lines = stream.readlines()
for line in lines:
if not "CONFIG_" in line:
continue
features = REGEX_SOURCE_FEATURE.findall(line)
for feature in features:
if not REGEX_FILTER_FEATURES.search(feature):
continue
sfiles = referenced_features.get(feature, set())
sfiles.add(sfile)
referenced_features[feature] = sfiles
def get_features_in_line(line):
"""Return mentioned Kconfig features in @line."""
return REGEX_FEATURE.findall(line)
def parse_kconfig_file(kfile, defined_features, referenced_features):
"""Parse @kfile and update feature definitions and references."""
lines = []
skip = False
with open(kfile, "r") as stream:
lines = stream.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip('\n')
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
feature_def = REGEX_KCONFIG_DEF.findall(line)
defined_features.add(feature_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
elif skip:
# ignore content of help messages
pass
elif REGEX_KCONFIG_STMT.match(line):
features = get_features_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
features.extend(get_features_in_line(line))
for feature in set(features):
paths = referenced_features.get(feature, set())
paths.add(kfile)
referenced_features[feature] = paths
if __name__ == "__main__":
main()
|
gpl-2.0
|
andreparrish/python-for-android
|
python-modules/twisted/twisted/web/xmlrpc.py
|
49
|
19640
|
# -*- test-case-name: twisted.web.test.test_xmlrpc -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A generic resource for publishing objects via XML-RPC.
Maintainer: Itamar Shtull-Trauring
"""
# System Imports
import sys, xmlrpclib, urlparse
# Sibling Imports
from twisted.web import resource, server, http
from twisted.internet import defer, protocol, reactor
from twisted.python import log, reflect, failure
# These are deprecated, use the class level definitions
NOT_FOUND = 8001
FAILURE = 8002
# Useful so people don't need to import xmlrpclib directly
Fault = xmlrpclib.Fault
Binary = xmlrpclib.Binary
Boolean = xmlrpclib.Boolean
DateTime = xmlrpclib.DateTime
# On Python 2.4 and earlier, DateTime.decode returns unicode.
if sys.version_info[:2] < (2, 5):
_decode = DateTime.decode
DateTime.decode = lambda self, value: _decode(self, value.encode('ascii'))
def withRequest(f, *args, **kwargs):
"""
Decorator to cause the request to be passed as the first argument
to the method.
If an I{xmlrpc_} method is wrapped with C{withRequest}, the
request object is passed as the first argument to that method.
For example::
@withRequest
def xmlrpc_echo(self, request, s):
return s
"""
f.withRequest = True
return f
class NoSuchFunction(Fault):
"""
There is no function by the given name.
"""
class Handler:
"""
Handle a XML-RPC request and store the state for a request in progress.
Override the run() method and return result using self.result,
a Deferred.
We require this class since we're not using threads, so we can't
encapsulate state in a running function if we're going to have
to wait for results.
For example, lets say we want to authenticate against twisted.cred,
run a LDAP query and then pass its result to a database query, all
as a result of a single XML-RPC command. We'd use a Handler instance
to store the state of the running command.
"""
def __init__(self, resource, *args):
self.resource = resource # the XML-RPC resource we are connected to
self.result = defer.Deferred()
self.run(*args)
def run(self, *args):
# event driven equivalent of 'raise UnimplementedError'
self.result.errback(
NotImplementedError("Implement run() in subclasses"))
class XMLRPC(resource.Resource):
"""
A resource that implements XML-RPC.
You probably want to connect this to '/RPC2'.
Methods published can return XML-RPC serializable results, Faults,
Binary, Boolean, DateTime, Deferreds, or Handler instances.
By default methods beginning with 'xmlrpc_' are published.
Sub-handlers for prefixed methods (e.g., system.listMethods)
can be added with putSubHandler. By default, prefixes are
separated with a '.'. Override self.separator to change this.
@ivar allowNone: Permit XML translating of Python constant None.
@type allowNone: C{bool}
@ivar useDateTime: Present datetime values as datetime.datetime objects?
Requires Python >= 2.5.
@type useDateTime: C{bool}
"""
# Error codes for Twisted, if they conflict with yours then
# modify them at runtime.
NOT_FOUND = 8001
FAILURE = 8002
isLeaf = 1
separator = '.'
allowedMethods = ('POST',)
def __init__(self, allowNone=False, useDateTime=False):
resource.Resource.__init__(self)
self.subHandlers = {}
self.allowNone = allowNone
self.useDateTime = useDateTime
def __setattr__(self, name, value):
if name == "useDateTime" and value and sys.version_info[:2] < (2, 5):
raise RuntimeError("useDateTime requires Python 2.5 or later.")
self.__dict__[name] = value
def putSubHandler(self, prefix, handler):
self.subHandlers[prefix] = handler
def getSubHandler(self, prefix):
return self.subHandlers.get(prefix, None)
def getSubHandlerPrefixes(self):
return self.subHandlers.keys()
def render_POST(self, request):
request.content.seek(0, 0)
request.setHeader("content-type", "text/xml")
try:
if self.useDateTime:
args, functionPath = xmlrpclib.loads(request.content.read(),
use_datetime=True)
else:
# Maintain backwards compatibility with Python < 2.5
args, functionPath = xmlrpclib.loads(request.content.read())
except Exception, e:
f = Fault(self.FAILURE, "Can't deserialize input: %s" % (e,))
self._cbRender(f, request)
else:
try:
function = self._getFunction(functionPath)
except Fault, f:
self._cbRender(f, request)
else:
# Use this list to track whether the response has failed or not.
# This will be used later on to decide if the result of the
# Deferred should be written out and Request.finish called.
responseFailed = []
request.notifyFinish().addErrback(responseFailed.append)
if getattr(function, 'withRequest', False):
d = defer.maybeDeferred(function, request, *args)
else:
d = defer.maybeDeferred(function, *args)
d.addErrback(self._ebRender)
d.addCallback(self._cbRender, request, responseFailed)
return server.NOT_DONE_YET
def _cbRender(self, result, request, responseFailed=None):
if responseFailed:
return
if isinstance(result, Handler):
result = result.result
if not isinstance(result, Fault):
result = (result,)
try:
try:
content = xmlrpclib.dumps(
result, methodresponse=True,
allow_none=self.allowNone)
except Exception, e:
f = Fault(self.FAILURE, "Can't serialize output: %s" % (e,))
content = xmlrpclib.dumps(f, methodresponse=True,
allow_none=self.allowNone)
request.setHeader("content-length", str(len(content)))
request.write(content)
except:
log.err()
request.finish()
def _ebRender(self, failure):
if isinstance(failure.value, Fault):
return failure.value
log.err(failure)
return Fault(self.FAILURE, "error")
def _getFunction(self, functionPath):
"""
Given a string, return a function, or raise NoSuchFunction.
This returned function will be called, and should return the result
of the call, a Deferred, or a Fault instance.
Override in subclasses if you want your own policy. The default
policy is that given functionPath 'foo', return the method at
self.xmlrpc_foo, i.e. getattr(self, "xmlrpc_" + functionPath).
If functionPath contains self.separator, the sub-handler for
the initial prefix is used to search for the remaining path.
"""
if functionPath.find(self.separator) != -1:
prefix, functionPath = functionPath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None:
raise NoSuchFunction(self.NOT_FOUND,
"no such subHandler %s" % prefix)
return handler._getFunction(functionPath)
f = getattr(self, "xmlrpc_%s" % functionPath, None)
if not f:
raise NoSuchFunction(self.NOT_FOUND,
"function %s not found" % functionPath)
elif not callable(f):
raise NoSuchFunction(self.NOT_FOUND,
"function %s not callable" % functionPath)
else:
return f
def _listFunctions(self):
"""
Return a list of the names of all xmlrpc methods.
"""
return reflect.prefixedMethodNames(self.__class__, 'xmlrpc_')
class XMLRPCIntrospection(XMLRPC):
"""
Implement the XML-RPC Introspection API.
By default, the methodHelp method returns the 'help' method attribute,
if it exists, otherwise the __doc__ method attribute, if it exists,
otherwise the empty string.
To enable the methodSignature method, add a 'signature' method attribute
containing a list of lists. See methodSignature's documentation for the
format. Note the type strings should be XML-RPC types, not Python types.
"""
def __init__(self, parent):
"""
Implement Introspection support for an XMLRPC server.
@param parent: the XMLRPC server to add Introspection support to.
@type parent: L{XMLRPC}
"""
XMLRPC.__init__(self)
self._xmlrpc_parent = parent
def xmlrpc_listMethods(self):
"""
Return a list of the method names implemented by this server.
"""
functions = []
todo = [(self._xmlrpc_parent, '')]
while todo:
obj, prefix = todo.pop(0)
functions.extend([prefix + name for name in obj._listFunctions()])
todo.extend([ (obj.getSubHandler(name),
prefix + name + obj.separator)
for name in obj.getSubHandlerPrefixes() ])
return functions
xmlrpc_listMethods.signature = [['array']]
def xmlrpc_methodHelp(self, method):
"""
Return a documentation string describing the use of the given method.
"""
method = self._xmlrpc_parent._getFunction(method)
return (getattr(method, 'help', None)
or getattr(method, '__doc__', None) or '')
xmlrpc_methodHelp.signature = [['string', 'string']]
def xmlrpc_methodSignature(self, method):
"""
Return a list of type signatures.
Each type signature is a list of the form [rtype, type1, type2, ...]
where rtype is the return type and typeN is the type of the Nth
argument. If no signature information is available, the empty
string is returned.
"""
method = self._xmlrpc_parent._getFunction(method)
return getattr(method, 'signature', None) or ''
xmlrpc_methodSignature.signature = [['array', 'string'],
['string', 'string']]
def addIntrospection(xmlrpc):
"""
Add Introspection support to an XMLRPC server.
@param parent: the XMLRPC server to add Introspection support to.
@type parent: L{XMLRPC}
"""
xmlrpc.putSubHandler('system', XMLRPCIntrospection(xmlrpc))
class QueryProtocol(http.HTTPClient):
def connectionMade(self):
self._response = None
self.sendCommand('POST', self.factory.path)
self.sendHeader('User-Agent', 'Twisted/XMLRPClib')
self.sendHeader('Host', self.factory.host)
self.sendHeader('Content-type', 'text/xml')
self.sendHeader('Content-length', str(len(self.factory.payload)))
if self.factory.user:
auth = '%s:%s' % (self.factory.user, self.factory.password)
auth = auth.encode('base64').strip()
self.sendHeader('Authorization', 'Basic %s' % (auth,))
self.endHeaders()
self.transport.write(self.factory.payload)
def handleStatus(self, version, status, message):
if status != '200':
self.factory.badStatus(status, message)
def handleResponse(self, contents):
"""
Handle the XML-RPC response received from the server.
Specifically, disconnect from the server and store the XML-RPC
response so that it can be properly handled when the disconnect is
finished.
"""
self.transport.loseConnection()
self._response = contents
def connectionLost(self, reason):
"""
The connection to the server has been lost.
If we have a full response from the server, then parse it and fired a
Deferred with the return value or C{Fault} that the server gave us.
"""
http.HTTPClient.connectionLost(self, reason)
if self._response is not None:
response, self._response = self._response, None
self.factory.parseResponse(response)
payloadTemplate = """<?xml version="1.0"?>
<methodCall>
<methodName>%s</methodName>
%s
</methodCall>
"""
class _QueryFactory(protocol.ClientFactory):
"""
XML-RPC Client Factory
@ivar path: The path portion of the URL to which to post method calls.
@type path: C{str}
@ivar host: The value to use for the Host HTTP header.
@type host: C{str}
@ivar user: The username with which to authenticate with the server
when making calls.
@type user: C{str} or C{NoneType}
@ivar password: The password with which to authenticate with the server
when making calls.
@type password: C{str} or C{NoneType}
@ivar useDateTime: Accept datetime values as datetime.datetime objects.
also passed to the underlying xmlrpclib implementation. Default to
False. Requires Python >= 2.5.
@type useDateTime: C{bool}
"""
deferred = None
protocol = QueryProtocol
def __init__(self, path, host, method, user=None, password=None,
allowNone=False, args=(), canceller=None, useDateTime=False):
"""
@param method: The name of the method to call.
@type method: C{str}
@param allowNone: allow the use of None values in parameters. It's
passed to the underlying xmlrpclib implementation. Default to False.
@type allowNone: C{bool} or C{NoneType}
@param args: the arguments to pass to the method.
@type args: C{tuple}
@param canceller: A 1-argument callable passed to the deferred as the
canceller callback.
@type canceller: callable or C{NoneType}
"""
self.path, self.host = path, host
self.user, self.password = user, password
self.payload = payloadTemplate % (method,
xmlrpclib.dumps(args, allow_none=allowNone))
self.deferred = defer.Deferred(canceller)
self.useDateTime = useDateTime
def parseResponse(self, contents):
if not self.deferred:
return
try:
if self.useDateTime:
response = xmlrpclib.loads(contents,
use_datetime=True)[0][0]
else:
# Maintain backwards compatibility with Python < 2.5
response = xmlrpclib.loads(contents)[0][0]
except:
deferred, self.deferred = self.deferred, None
deferred.errback(failure.Failure())
else:
deferred, self.deferred = self.deferred, None
deferred.callback(response)
def clientConnectionLost(self, _, reason):
if self.deferred is not None:
deferred, self.deferred = self.deferred, None
deferred.errback(reason)
clientConnectionFailed = clientConnectionLost
def badStatus(self, status, message):
deferred, self.deferred = self.deferred, None
deferred.errback(ValueError(status, message))
class Proxy:
"""
A Proxy for making remote XML-RPC calls.
Pass the URL of the remote XML-RPC server to the constructor.
Use proxy.callRemote('foobar', *args) to call remote method
'foobar' with *args.
@ivar user: The username with which to authenticate with the server
when making calls. If specified, overrides any username information
embedded in C{url}. If not specified, a value may be taken from
C{url} if present.
@type user: C{str} or C{NoneType}
@ivar password: The password with which to authenticate with the server
when making calls. If specified, overrides any password information
embedded in C{url}. If not specified, a value may be taken from
C{url} if present.
@type password: C{str} or C{NoneType}
@ivar allowNone: allow the use of None values in parameters. It's
passed to the underlying xmlrpclib implementation. Default to False.
@type allowNone: C{bool} or C{NoneType}
@ivar useDateTime: Accept datetime values as datetime.datetime objects.
also passed to the underlying xmlrpclib implementation. Default to
False. Requires Python >= 2.5.
@type useDateTime: C{bool}
@ivar queryFactory: object returning a factory for XML-RPC protocol. Mainly
useful for tests.
"""
queryFactory = _QueryFactory
def __init__(self, url, user=None, password=None, allowNone=False,
useDateTime=False):
"""
@param url: The URL to which to post method calls. Calls will be made
over SSL if the scheme is HTTPS. If netloc contains username or
password information, these will be used to authenticate, as long as
the C{user} and C{password} arguments are not specified.
@type url: C{str}
"""
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
netlocParts = netloc.split('@')
if len(netlocParts) == 2:
userpass = netlocParts.pop(0).split(':')
self.user = userpass.pop(0)
try:
self.password = userpass.pop(0)
except:
self.password = None
else:
self.user = self.password = None
hostport = netlocParts[0].split(':')
self.host = hostport.pop(0)
try:
self.port = int(hostport.pop(0))
except:
self.port = None
self.path = path
if self.path in ['', None]:
self.path = '/'
self.secure = (scheme == 'https')
if user is not None:
self.user = user
if password is not None:
self.password = password
self.allowNone = allowNone
self.useDateTime = useDateTime
def __setattr__(self, name, value):
if name == "useDateTime" and value and sys.version_info[:2] < (2, 5):
raise RuntimeError("useDateTime requires Python 2.5 or later.")
self.__dict__[name] = value
def callRemote(self, method, *args):
"""
Call remote XML-RPC C{method} with given arguments.
@return: a L{defer.Deferred} that will fire with the method response,
or a failure if the method failed. Generally, the failure type will
be L{Fault}, but you can also have an C{IndexError} on some buggy
servers giving empty responses.
If the deferred is cancelled before the request completes, the
connection is closed and the deferred will fire with a
L{defer.CancelledError}.
"""
def cancel(d):
factory.deferred = None
connector.disconnect()
factory = self.queryFactory(
self.path, self.host, method, self.user,
self.password, self.allowNone, args, cancel, self.useDateTime)
if self.secure:
from twisted.internet import ssl
connector = reactor.connectSSL(self.host, self.port or 443,
factory, ssl.ClientContextFactory())
else:
connector = reactor.connectTCP(self.host, self.port or 80, factory)
return factory.deferred
__all__ = [
"XMLRPC", "Handler", "NoSuchFunction", "Proxy",
"Fault", "Binary", "Boolean", "DateTime"]
|
apache-2.0
|
HolgerPeters/scikit-learn
|
examples/cluster/plot_digits_agglomeration.py
|
377
|
1694
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
|
bsd-3-clause
|
jerome-nexedi/dream
|
dream/simulation/JobMA.py
|
5
|
1587
|
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
'''
Created on 06 May 2013
@author: Anna, George
'''
'''
Entity that is the MA in the DemandPlanning case. Inherits from Job (TODO see if this offers anything)
'''
from Job import Job
class JobMA(Job):
def __init__(self,orderID, MAid, SPid, PPOSid, qty, minQty, origWeek, future):
Job.__init__(self, id=MAid)
self.type = 'item'
self.orderID = orderID
self.MAid = MAid
self.SPid = SPid
self.PPOSid = PPOSid
self.qty = qty
self.minQty = minQty
self.originalWeek = origWeek
self.future = future # if 1 suggests that the MA belongs to the future demand (0 for the PPOS to be disaggregated)
self.weekPlan = self.originalWeek
|
gpl-3.0
|
ptisserand/ansible
|
lib/ansible/modules/database/mysql/mysql_variables.py
|
78
|
5632
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Balazs Pocze <[email protected]>
# Certain parts are taken from Mark Theunissen's mysqldb module
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mysql_variables
short_description: Manage MySQL global variables
description:
- Query / Set MySQL variables
version_added: 1.3
author: "Balazs Pocze (@banyek)"
options:
variable:
description:
- Variable name to operate
required: True
value:
description:
- If set, then sets variable value to this
required: False
extends_documentation_fragment: mysql
'''
EXAMPLES = '''
# Check for sync_binlog setting
- mysql_variables:
variable: sync_binlog
# Set read_only variable to 1
- mysql_variables:
variable: read_only
value: 1
'''
import os
import warnings
from re import match
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError, mysql_quote_identifier
from ansible.module_utils.mysql import mysql_connect, mysqldb_found
from ansible.module_utils._text import to_native
def typedvalue(value):
"""
Convert value to number whenever possible, return same value
otherwise.
>>> typedvalue('3')
3
>>> typedvalue('3.0')
3.0
>>> typedvalue('foobar')
'foobar'
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall()
if len(mysqlvar_val) is 1:
return mysqlvar_val[0][1]
else:
return None
def setvariable(cursor, mysqlvar, value):
""" Set a global mysql variable to a given value
The DB driver will handle quoting of the given value based on its
type, thus numeric strings like '3.0' or '8' are illegal, they
should be passed as numeric literals.
"""
query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars')
try:
cursor.execute(query + "%s", (value,))
cursor.fetchall()
result = True
except Exception as e:
result = to_native(e)
return result
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
variable=dict(default=None),
value=dict(default=None),
ssl_cert=dict(default=None),
ssl_key=dict(default=None),
ssl_ca=dict(default=None),
connect_timeout=dict(default=30, type='int'),
config_file=dict(default="~/.my.cnf", type="path")
)
)
user = module.params["login_user"]
password = module.params["login_password"]
ssl_cert = module.params["ssl_cert"]
ssl_key = module.params["ssl_key"]
ssl_ca = module.params["ssl_ca"]
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
db = 'mysql'
mysqlvar = module.params["variable"]
value = module.params["value"]
if mysqlvar is None:
module.fail_json(msg="Cannot run without variable to operate with")
if match('^[0-9a-z_]+$', mysqlvar) is None:
module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar)
if not mysqldb_found:
module.fail_json(msg="The MySQL-python module is required.")
else:
warnings.filterwarnings('error', category=MySQLdb.Warning)
try:
cursor = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception as e:
if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
"Exception message: %s" % (config_file, to_native(e)))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, to_native(e)))
mysqlvar_val = getvariable(cursor, mysqlvar)
if mysqlvar_val is None:
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
if value is None:
module.exit_json(msg=mysqlvar_val)
else:
# Type values before using them
value_wanted = typedvalue(value)
value_actual = typedvalue(mysqlvar_val)
if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False)
try:
result = setvariable(cursor, mysqlvar, value_wanted)
except SQLParseError as e:
result = to_native(e)
if result is True:
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
else:
module.fail_json(msg=result, changed=False)
if __name__ == '__main__':
main()
|
gpl-3.0
|
indictranstech/internal-frappe
|
frappe/utils/user.py
|
1
|
8109
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _dict
import frappe.share
class User:
"""
A user object is created at the beginning of every request with details of the use.
The global user object is `frappe.user`
"""
def __init__(self, name=''):
self.defaults = None
self.name = name or frappe.session.get('user')
self.roles = []
self.all_read = []
self.can_create = []
self.can_read = []
self.can_write = []
self.can_cancel = []
self.can_delete = []
self.can_search = []
self.can_get_report = []
self.can_import = []
self.can_export = []
self.can_print = []
self.can_email = []
self.can_set_user_permissions = []
self.allow_modules = []
self.in_create = []
if not frappe.flags.in_install_db and not frappe.flags.in_test:
try:
self.doc = frappe.get_doc("User", self.name)
except frappe.DoesNotExistError:
pass
except Exception, e:
# install boo-boo
if e.args[0] != 1146: raise
def get_roles(self):
"""get list of roles"""
if not self.roles:
self.roles = get_roles(self.name)
return self.roles
def get_block_modules(self):
"""Returns list of blocked modules"""
return [d.module for d in self.doc.block_modules] if self.doc.block_modules else []
def build_doctype_map(self):
"""build map of special doctype properties"""
self.doctype_map = {}
for r in frappe.db.sql("""select name, in_create, issingle, istable,
read_only, module from tabDocType""", as_dict=1):
self.doctype_map[r['name']] = r
def build_perm_map(self):
"""build map of permissions at level 0"""
self.perm_map = {}
roles = self.get_roles()
for r in frappe.db.sql("""select * from tabDocPerm where docstatus=0
and ifnull(permlevel,0)=0
and role in ({roles})""".format(roles=", ".join(["%s"]*len(roles))), tuple(roles), as_dict=1):
dt = r['parent']
if not dt in self.perm_map:
self.perm_map[dt] = {}
for k in frappe.permissions.rights:
if not self.perm_map[dt].get(k):
self.perm_map[dt][k] = r.get(k)
def build_permissions(self):
"""build lists of what the user can read / write / create
quirks:
read_only => Not in Search
in_create => Not in create
"""
self.build_doctype_map()
self.build_perm_map()
user_shared = frappe.share.get_shared_doctypes()
for dt in self.doctype_map:
dtp = self.doctype_map[dt]
p = self.perm_map.get(dt, {})
if not p.get("read") and (dt in user_shared):
p["read"] = 1
if not dtp.get('istable'):
if p.get('create') and not dtp.get('issingle'):
if dtp.get('in_create'):
self.in_create.append(dt)
else:
self.can_create.append(dt)
elif p.get('write'):
self.can_write.append(dt)
elif p.get('read'):
if dtp.get('read_only'):
self.all_read.append(dt)
else:
self.can_read.append(dt)
if p.get('cancel'):
self.can_cancel.append(dt)
if p.get('delete'):
self.can_delete.append(dt)
if (p.get('read') or p.get('write') or p.get('create')):
if p.get('report'):
self.can_get_report.append(dt)
for key in ("import", "export", "print", "email", "set_user_permissions"):
if p.get(key):
getattr(self, "can_" + key).append(dt)
if not dtp.get('istable'):
if not dtp.get('issingle') and not dtp.get('read_only'):
self.can_search.append(dt)
if not dtp.get('module') in self.allow_modules:
self.allow_modules.append(dtp.get('module'))
self.can_write += self.can_create
self.can_write += self.in_create
self.can_read += self.can_write
self.shared = frappe.db.sql_list("""select distinct share_doctype from `tabDocShare`
where `user`=%s and `read`=1""", self.name)
self.can_read = list(set(self.can_read + self.shared))
self.all_read += self.can_read
if "System Manager" in self.roles:
self.can_import = frappe.db.sql_list("""select name from `tabDocType`
where allow_import = 1""")
def get_defaults(self):
import frappe.defaults
self.defaults = frappe.defaults.get_defaults(self.name)
return self.defaults
# update recent documents
def update_recent(self, dt, dn):
rdl = frappe.cache().get_value("recent:" + self.name) or []
new_rd = [dt, dn]
# clear if exists
for i in range(len(rdl)):
rd = rdl[i]
if rd==new_rd:
del rdl[i]
break
if len(rdl) > 19:
rdl = rdl[:19]
rdl = [new_rd] + rdl
frappe.cache().set_value("recent:" + self.name, rdl)
def _get(self, key):
if not self.can_read:
self.build_permissions()
return getattr(self, key)
def get_can_read(self):
"""return list of doctypes that the user can read"""
if not self.can_read:
self.build_permissions()
return self.can_read
def load_user(self):
d = frappe.db.sql("""select email, first_name, last_name, time_zone,
email_signature, user_type, language, background_image, background_style
from tabUser where name = %s""", (self.name,), as_dict=1)[0]
if not self.can_read:
self.build_permissions()
d.name = self.name
d.recent = json.dumps(frappe.cache().get_value("recent:" + self.name) or [])
d.roles = self.get_roles()
d.defaults = self.get_defaults()
d.block_modules = self.get_block_modules()
for key in ("can_create", "can_write", "can_read", "can_cancel", "can_delete",
"can_get_report", "allow_modules", "all_read", "can_search",
"in_create", "can_export", "can_import", "can_print", "can_email",
"can_set_user_permissions"):
d[key] = list(set(getattr(self, key)))
return d
def get_user_fullname(user):
fullname = frappe.db.sql("SELECT CONCAT_WS(' ', first_name, last_name) FROM `tabUser` WHERE name=%s", (user,))
return fullname and fullname[0][0] or ''
def get_fullname_and_avatar(user):
first_name, last_name, avatar = frappe.db.get_value("User",
user, ["first_name", "last_name", "user_image"])
return _dict({
"fullname": " ".join(filter(None, [first_name, last_name])),
"avatar": avatar
})
def get_system_managers(only_name=False):
"""returns all system manager's user details"""
import email.utils
from frappe.core.doctype.user.user import STANDARD_USERS
system_managers = frappe.db.sql("""select distinct name,
concat_ws(" ", if(first_name="", null, first_name), if(last_name="", null, last_name))
as fullname from tabUser p
where docstatus < 2 and enabled = 1
and name not in ({})
and exists (select * from tabUserRole ur
where ur.parent = p.name and ur.role="System Manager")""".format(", ".join(["%s"]*len(STANDARD_USERS))),
STANDARD_USERS, as_dict=True)
if only_name:
return [p.name for p in system_managers]
else:
return [email.utils.formataddr((p.fullname, p.name)) for p in system_managers]
def add_role(user, role):
frappe.get_doc("User", user).add_roles(role)
def add_system_manager(email, first_name=None, last_name=None):
# add user
user = frappe.new_doc("User")
user.update({
"name": email,
"email": email,
"enabled": 1,
"first_name": first_name or email,
"last_name": last_name,
"user_type": "System User"
})
user.insert()
# add roles
roles = frappe.db.sql_list("""select name from `tabRole`
where name not in ("Administrator", "Guest", "All")""")
user.add_roles(*roles)
def get_roles(user=None, with_standard=True):
"""get roles of current user"""
if not user:
user = frappe.session.user
if user=='Guest':
return ['Guest']
roles = frappe.cache().get_value("roles", user=user)
if not roles:
roles = [r[0] for r in frappe.db.sql("""select role from tabUserRole
where parent=%s and role!='All'""", (user,))] + ['All']
frappe.cache().set_value("roles", roles, user=user)
# filter standard if required
if not with_standard:
roles = filter(lambda x: x not in ['All', 'Guest', 'Administrator'], roles)
return roles
def get_enabled_system_users():
return frappe.db.sql("""select * from tabUser where
user_type='System User' and enabled=1 and name not in ('Administrator', 'Guest')""", as_dict=1)
def is_website_user(user):
return frappe.get_user(user).doc.user_type == "Website User"
|
mit
|
youprofit/shogun
|
examples/undocumented/python_modular/distance_hammingword_modular.py
|
26
|
1551
|
#!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindna = lm.load_dna('../data/fm_train_dna.dat')
testdna = lm.load_dna('../data/fm_test_dna.dat')
testdat = lm.load_labels('../data/fm_test_real.dat')
parameter_list = [[traindna,testdna,testdat,4,0,False,False],
[traindna,testdna,testdat,3,0,False,False]]
def distance_hammingword_modular (fm_train_dna=traindna,fm_test_dna=testdna,
fm_test_real=testdat,order=3,gap=0,reverse=False,use_sign=False):
from modshogun import StringCharFeatures, StringWordFeatures, DNA
from modshogun import SortWordString
from modshogun import HammingWordDistance
charfeat=StringCharFeatures(DNA)
charfeat.set_features(fm_train_dna)
feats_train=StringWordFeatures(charfeat.get_alphabet())
feats_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
preproc=SortWordString()
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
charfeat=StringCharFeatures(DNA)
charfeat.set_features(fm_test_dna)
feats_test=StringWordFeatures(charfeat.get_alphabet())
feats_test.obtain_from_char(charfeat, order-1, order, gap, reverse)
feats_test.add_preprocessor(preproc)
feats_test.apply_preprocessor()
distance=HammingWordDistance(feats_train, feats_train, use_sign)
dm_train=distance.get_distance_matrix()
distance.init(feats_train, feats_test)
dm_test=distance.get_distance_matrix()
return distance,dm_train,dm_test
if __name__=='__main__':
print('HammingWordDistance')
distance_hammingword_modular(*parameter_list[0])
|
gpl-3.0
|
sigma-random/asuswrt-merlin
|
release/src/router/samba36/source3/build/charset.py
|
19
|
2022
|
# tests for charsets for Samba3
from Configure import conf
@conf
def CHECK_SAMBA3_CHARSET(conf, crossbuild=False):
'''Check for default charsets for Samba3
'''
if conf.CHECK_ICONV(define='HAVE_NATIVE_ICONV'):
default_dos_charset=False
default_display_charset=False
default_unix_charset=False
# check for default dos charset name
for charset in ['CP850', 'IBM850']:
if conf.CHECK_CHARSET_EXISTS(charset, headers='iconv.h'):
default_dos_charset=charset
break
# check for default display charset name
for charset in ['ASCII', '646']:
if conf.CHECK_CHARSET_EXISTS(charset, headers='iconv.h'):
default_display_charset=charset
break
# check for default unix charset name
for charset in ['UTF-8', 'UTF8']:
if conf.CHECK_CHARSET_EXISTS(charset, headers='iconv.h'):
default_unix_charset=charset
break
# At this point, we have a libiconv candidate. We know that
# we have the right headers and libraries, but we don't know
# whether it does the conversions we want. We can't test this
# because we are cross-compiling. This is not necessarily a big
# deal, since we can't guarantee that the results we get now will
# match the results we get at runtime anyway.
if crossbuild:
default_dos_charset="CP850"
default_display_charset="ASCII"
default_unix_charset="UTF-8"
# TODO: this used to warn about the set charset on cross builds
conf.DEFINE('DEFAULT_DOS_CHARSET', default_dos_charset, quote=True)
conf.DEFINE('DEFAULT_DISPLAY_CHARSET', default_display_charset, quote=True)
conf.DEFINE('DEFAULT_UNIX_CHARSET', default_unix_charset, quote=True)
else:
conf.DEFINE('DEFAULT_DOS_CHARSET', "ASCII", quote=True)
conf.DEFINE('DEFAULT_DISPLAY_CHARSET', "ASCII", quote=True)
conf.DEFINE('DEFAULT_UNIX_CHARSET', "UTF8", quote=True)
|
gpl-2.0
|
lesoros/earthcoin_p2pool
|
p2pool/util/expiring_dict.py
|
237
|
5233
|
from __future__ import division
import time
import weakref
from p2pool.util import deferral
class Node(object):
def __init__(self, contents, prev=None, next=None):
self.contents, self.prev, self.next = contents, prev, next
def insert_before(self, contents):
self.prev.next = self.prev = node = Node(contents, self.prev, self)
return node
def insert_after(self, contents):
self.next.prev = self.next = node = Node(contents, self, self.next)
return node
@staticmethod
def connect(prev, next):
if prev.next is not None or next.prev is not None:
raise ValueError('node already connected')
prev.next, next.prev = next, prev
def replace(self, contents):
self.contents = contents
def delete(self):
if self.prev.next is None or self.next.prev is None:
raise ValueError('node not connected')
self.prev.next, self.next.prev = self.next, self.prev
self.next = self.prev = None
class LinkedList(object):
def __init__(self, iterable=[]):
self.start, self.end = Node(None), Node(None)
Node.connect(self.start, self.end)
for item in iterable:
self.append(item)
def __repr__(self):
return 'LinkedList(%r)' % (list(self),)
def __len__(self):
return sum(1 for x in self)
def __iter__(self):
cur = self.start.next
while cur is not self.end:
cur2 = cur
cur = cur.next
yield cur2 # in case cur is deleted, but items inserted after are ignored
def __reversed__(self):
cur = self.end.prev
while cur is not self.start:
cur2 = cur
cur = cur.prev
yield cur2
def __getitem__(self, index):
if index < 0:
cur = self.end
for i in xrange(-index):
cur = cur.prev
if cur is self.start:
raise IndexError('index out of range')
else:
cur = self.start
for i in xrange(index + 1):
cur = cur.next
if cur is self.end:
raise IndexError('index out of range')
return cur
def appendleft(self, item):
return self.start.insert_after(item)
def append(self, item):
return self.end.insert_before(item)
def popleft(self):
node = self.start.next
if node is self.end:
raise IndexError('popleft from empty')
node.delete()
return node.contents
def pop(self):
node = self.end.prev
if node is self.start:
raise IndexError('pop from empty')
node.delete()
return node.contents
class ExpiringDict(object):
def __init__(self, expiry_time, get_touches=True):
self.expiry_time = expiry_time
self.get_touches = get_touches
self.expiry_deque = LinkedList()
self.d = dict() # key -> node, value
self_ref = weakref.ref(self, lambda _: expire_loop.stop() if expire_loop.running else None)
self._expire_loop = expire_loop = deferral.RobustLoopingCall(lambda: self_ref().expire())
expire_loop.start(1)
def stop(self):
self._expire_loop.stop()
def __repr__(self):
return 'ExpiringDict' + repr(self.__dict__)
def __len__(self):
return len(self.d)
_nothing = object()
def touch(self, key, value=_nothing):
'Updates expiry node, optionally replacing value, returning new value'
if value is self._nothing or key in self.d:
node, old_value = self.d[key]
node.delete()
new_value = old_value if value is self._nothing else value
self.d[key] = self.expiry_deque.append((time.time() + self.expiry_time, key)), new_value
return new_value
def expire(self):
t = time.time()
for node in self.expiry_deque:
timestamp, key = node.contents
if timestamp > t:
break
del self.d[key]
node.delete()
def __contains__(self, key):
return key in self.d
def __getitem__(self, key):
if self.get_touches:
value = self.touch(key)
else:
node, value = self.d[key]
return value
def __setitem__(self, key, value):
self.touch(key, value)
def __delitem__(self, key):
node, value = self.d.pop(key)
node.delete()
def get(self, key, default_value=None):
if key in self.d:
res = self[key]
else:
res = default_value
return res
def setdefault(self, key, default_value):
if key in self.d:
return self[key]
else:
self[key] = default_value
return default_value
def keys(self):
return self.d.keys()
def values(self):
return [value for node, value in self.d.itervalues()]
def itervalues(self):
for node, value in self.d.itervalues():
yield value
|
gpl-3.0
|
MD-Studio/MDStudio
|
mdstudio/mdstudio/tests/api/test_converter.py
|
1
|
4204
|
import datetime
from unittest import TestCase
import pytz
from mdstudio.api.converter import convert_obj_to_json
class ConverterTest(TestCase):
def test_convert_obj_to_json_date_time(self):
document = {
'date': datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc),
'f': '2017-10-26T09:15:00+00:00'
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'date': '2017-10-26T09:16:00+00:00',
'f': '2017-10-26T09:15:00+00:00'
})
def test_convert_obj_to_json_date_time_nested(self):
document = {
'o': {
'date': datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc),
'f': '2017-10-26T09:15:00+00:00'
}
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'o': {
'date': '2017-10-26T09:16:00+00:00',
'f': '2017-10-26T09:15:00+00:00'
}
})
def test_convert_obj_to_json_date_time_nested_list(self):
document = {
'o': {
'date': [datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc),
datetime.datetime(2017, 10, 26, 9, 15, tzinfo=pytz.utc)],
'f': '2017-10-26T09:15:00+00:00'
}
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'o': {
'date': ['2017-10-26T09:16:00+00:00', '2017-10-26T09:15:00+00:00'],
'f': '2017-10-26T09:15:00+00:00'
}
})
def test_convert_obj_to_json_date(self):
document = {
'date': datetime.date(2017, 10, 26),
'f': '2017-10-26'
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'date': '2017-10-26',
'f': '2017-10-26'
})
def test_convert_obj_to_json_date_nested(self):
document = {
'o': {
'date': datetime.date(2017, 10, 26),
'f': '2017-10-26'
}
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'o': {
'date': '2017-10-26',
'f': '2017-10-26'
}
})
def test_convert_obj_to_json_date_nested_list(self):
document = {
'o': {
'date': [datetime.date(2017, 10, 26),
datetime.date(2017, 10, 26)],
'f': '2017-10-26'
}
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'o': {
'date': ['2017-10-26', '2017-10-26'],
'f': '2017-10-26'
}
})
def test_convert_obj_to_json_date_time_mixed(self):
document = {
'date': datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc),
'date2': datetime.date(2017, 10, 26)
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'date': '2017-10-26T09:16:00+00:00',
'date2': '2017-10-26'
})
def test_convert_obj_to_json_date_time_mixed2(self):
document = {
'date2': datetime.date(2017, 10, 26),
'date': datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc)
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'date2': '2017-10-26',
'date': '2017-10-26T09:16:00+00:00'
})
def test_convert_obj_to_json_bytes_keys(self):
document = {
b'requestHash': 'somerequesthash',
b'uri': 'mdstudio.logger.endpoint.push-logs',
'action': 'call',
'logType': 'user',
'username': 'lksjflk'
}
document = convert_obj_to_json(document)
self.assertEqual(document, {
'requestHash': 'somerequesthash',
'uri': 'mdstudio.logger.endpoint.push-logs',
'action': 'call',
'logType': 'user',
'username': 'lksjflk'
})
|
apache-2.0
|
AkaZuko/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/views/course.py
|
2
|
64077
|
''' -- imports from python libraries -- '''
# from datetime import datetime
import datetime
import json
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect # , HttpResponse uncomment when to use
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import render_to_response # , render uncomment when to use
from django.template import RequestContext
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from mongokit import IS
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.settings import GAPPS, MEDIA_ROOT, GSTUDIO_TASK_TYPES
from gnowsys_ndf.ndf.models import NodeJSONEncoder
from gnowsys_ndf.settings import GSTUDIO_SITE_NAME
from gnowsys_ndf.ndf.models import Node, AttributeType, RelationType
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.file import save_file
from gnowsys_ndf.ndf.templatetags.ndf_tags import edit_drawer_widget
from gnowsys_ndf.ndf.views.methods import get_node_common_fields, parse_template_data, get_execution_time, delete_node
from gnowsys_ndf.ndf.views.notify import set_notif_val
from gnowsys_ndf.ndf.views.methods import get_property_order_with_value
from gnowsys_ndf.ndf.views.methods import create_gattribute, create_grelation, create_task
from gnowsys_ndf.notification import models as notification
GST_COURSE = node_collection.one({'_type': "GSystemType", 'name': "Course"})
GST_ACOURSE = node_collection.one({'_type': "GSystemType", 'name': "Announced Course"})
app = GST_COURSE
@get_execution_time
def course(request, group_id, course_id=None):
"""
* Renders a list of all 'courses' available within the database.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app_id = None
app_id = app._id
course_coll = None
all_course_coll = None
ann_course_coll = None
enrolled_course_coll = []
course_enrollment_status = None
app_set_id = None
if course_id is None:
course_ins = node_collection.find_one({'_type': "GSystemType", "name": "Course"})
if course_ins:
course_id = str(course_ins._id)
app_set = node_collection.one({'_type': "GSystemType", 'name': "Announced Course"})
app_set_id = app_set._id
# Course search view
title = GST_COURSE.name
if request.user.id:
course_coll = node_collection.find({'member_of': GST_COURSE._id,'group_set': ObjectId(group_id),'status':u"DRAFT"})
all_course_coll = node_collection.find({'member_of': {'$in': [GST_COURSE._id,GST_ACOURSE._id]},
'group_set': ObjectId(group_id),'status':{'$in':[u"PUBLISHED",u"DRAFT"]}})
auth_node = node_collection.one({'_type': "Author", 'created_by': int(request.user.id)})
'''
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_dict = each["course_enrollment_status"]
course_enrollment_status = [ObjectId(each) for each in course_enrollment_dict]
enrolled_course_coll = node_collection.find({'_id': {'$in': course_enrollment_status}})
'''
ann_course_coll = node_collection.find({'member_of': GST_ACOURSE._id, 'group_set': ObjectId(group_id),'status':u"PUBLISHED"})
return render_to_response("ndf/course.html",
{'title': title,
'app_id': app_id, 'course_gst': GST_COURSE,
'app_set_id': app_set_id,
'searching': True, 'course_coll': course_coll,
'groupid': group_id, 'group_id': group_id,
'all_course_coll': all_course_coll,
'enrolled_course_coll': enrolled_course_coll,
'ann_course_coll': ann_course_coll
},
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def create_edit(request, group_id, node_id=None):
"""Creates/Modifies details about the given quiz-item.
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
at_course_type = node_collection.one({'_type': 'AttributeType', 'name': 'nussd_course_type'})
context_variables = {'title': GST_COURSE.name,
'group_id': group_id,
'groupid': group_id
}
if node_id:
course_node = node_collection.one({'_type': u'GSystem', '_id': ObjectId(node_id)})
else:
course_node = node_collection.collection.GSystem()
available_nodes = node_collection.find({'_type': u'GSystem', 'member_of': ObjectId(GST_COURSE._id),'group_set': ObjectId(group_id),'status':{"$in":[u"DRAFT",u"PUBLISHED"]}})
nodes_list = []
for each in available_nodes:
nodes_list.append(str((each.name).strip().lower()))
if request.method == "POST":
# get_node_common_fields(request, course_node, group_id, GST_COURSE)
course_node.save(is_changed=get_node_common_fields(request, course_node, group_id, GST_COURSE))
create_gattribute(course_node._id, at_course_type, u"General")
return HttpResponseRedirect(reverse('course', kwargs={'group_id': group_id}))
else:
if node_id:
context_variables['node'] = course_node
context_variables['groupid'] = group_id
context_variables['group_id'] = group_id
context_variables['app_id'] = app._id
context_variables['nodes_list'] = json.dumps(nodes_list)
return render_to_response("ndf/course_create_edit.html",
context_variables,
context_instance=RequestContext(request)
)
# @login_required
@get_execution_time
def course_detail(request, group_id, _id):
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
course_structure_exists = False
enrolled_status = False
check_enroll_status = False
title = GST_COURSE.name
course_node = node_collection.one({"_id": ObjectId(_id)})
if course_node.collection_set:
course_structure_exists = True
gs_name = course_node.member_of_names_list[0]
context_variables = {'groupid': group_id,
'group_id': group_id,
'app_id': app._id,
'title': title,
'node': course_node,
'node_type': gs_name
}
if gs_name == "Course":
context_variables["course_structure_exists"] = course_structure_exists
if course_node.relation_set:
for rel in course_node.relation_set:
if "announced_as" in rel:
cnode = node_collection.one({'_id': ObjectId(rel["announced_as"][0])},{'_id':1})
context_variables["acnode"] = str(cnode['_id'])
check_enroll_status = True
break
else:
if course_node.relation_set:
for rel in course_node.relation_set:
if "announced_for" in rel:
cnode = node_collection.one({'_id': ObjectId(rel["announced_for"][0])})
context_variables["cnode"] = cnode
check_enroll_status = True
break
if request.user.id:
if check_enroll_status:
usr_id = int(request.user.id)
auth_node = node_collection.one({'_type': "Author", 'created_by': usr_id})
course_enrollment_status = {}
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_status = each["course_enrollment_status"]
if "acnode" in context_variables:
str_course_id = str(context_variables["acnode"])
else:
str_course_id = str(course_node._id)
if course_enrollment_status:
if str_course_id in course_enrollment_status:
enrolled_status = True
context_variables['enrolled_status'] = enrolled_status
return render_to_response("ndf/course_detail.html",
context_variables,
context_instance=RequestContext(request)
)
@login_required
@get_execution_time
def course_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Creates/Modifies document of given sub-types of Course(s).
"""
auth = None
tiss_site = False
if ObjectId.is_valid(group_id) is False:
group_ins = node_collection.one({'_type': "Group", "name": group_id})
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
if auth:
group_id = str(auth._id)
else:
pass
if GSTUDIO_SITE_NAME is "TISS":
tiss_site = True
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
# app_set = ""
app_collection_set = []
title = ""
course_gst = None
course_gs = None
hide_mis_meta_content = True
mis_admin = None
property_order_list = []
template = ""
template_prefix = "mis"
if request.user:
if auth is None:
auth = node_collection.one({
'_type': 'Author', 'name': unicode(request.user.username)
})
agency_type = auth.agency_type
agency_type_node = node_collection.one({
'_type': "GSystemType", 'name': agency_type
}, {
'collection_set': 1
})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(
node_collection.one({
"_id": eachset
}, {
'_id': 1, 'name': 1, 'type_of': 1
})
)
if app_set_id:
course_gst = node_collection.one({
'_type': "GSystemType", '_id': ObjectId(app_set_id)
}, {
'name': 1, 'type_of': 1
})
template = "ndf/" + course_gst.name.strip().lower().replace(' ', '_') \
+ "_create_edit.html"
title = course_gst.name
if app_set_instance_id:
course_gs = node_collection.one({
'_type': "GSystem", '_id': ObjectId(app_set_instance_id)
})
else:
course_gs = node_collection.collection.GSystem()
course_gs.member_of.append(course_gst._id)
property_order_list = get_property_order_with_value(course_gs)
if request.method == "POST":
# [A] Save course-node's base-field(s)
start_time = ""
if "start_time" in request.POST:
start_time = request.POST.get("start_time", "")
start_time = datetime.datetime.strptime(start_time, "%m/%Y")
end_time = ""
if "end_time" in request.POST:
end_time = request.POST.get("end_time", "")
end_time = datetime.datetime.strptime(end_time, "%m/%Y")
nussd_course_type = ""
if "nussd_course_type" in request.POST:
nussd_course_type = request.POST.get("nussd_course_type", "")
nussd_course_type = unicode(nussd_course_type)
unset_ac_options = []
if "unset-ac-options" in request.POST:
unset_ac_options = request.POST.getlist("unset-ac-options")
else:
# Just to execute loop at least once for Course Sub-Types
# other than 'Announced Course'
unset_ac_options = ["dummy"]
if course_gst.name == u"Announced Course":
announce_to_colg_list = request.POST.get(
"announce_to_colg_list", ""
)
announce_to_colg_list = [ObjectId(colg_id) for colg_id in announce_to_colg_list.split(",")]
colg_ids = []
# Parsing ObjectId -- from string format to ObjectId
for each in announce_to_colg_list:
if each and ObjectId.is_valid(each):
colg_ids.append(ObjectId(each))
# Fetching college(s)
colg_list_cur = node_collection.find({
'_id': {'$in': colg_ids}
}, {
'name': 1, 'attribute_set.enrollment_code': 1
})
if "_id" in course_gs:
# It means we are in editing mode of given Announced Course GSystem
unset_ac_options = [course_gs._id]
ac_nc_code_list = []
# Prepare a list
# 0th index (ac_node): Announced Course node,
# 1st index (nc_id): NUSSD Course node's ObjectId,
# 2nd index (nc_course_code): NUSSD Course's code
for cid in unset_ac_options:
ac_node = None
nc_id = None
nc_course_code = ""
# Here course_gst is Announced Course GSytemType's node
ac_node = node_collection.one({
'_id': ObjectId(cid), 'member_of': course_gst._id
})
# If ac_node found, means
# (1) we are dealing with creating Announced Course
# else,
# (2) we are in editing phase of Announced Course
course_node = None
if not ac_node:
# In this case, cid is of NUSSD Course GSystem
# So fetch that to extract course_code
# Set to nc_id
ac_node = None
course_node = node_collection.one({
'_id': ObjectId(cid)
})
else:
# In this case, fetch NUSSD Course from
# Announced Course GSystem's announced_for relationship
for rel in ac_node.relation_set:
if "announced_for" in rel:
course_node_ids = rel["announced_for"]
break
# Fetch NUSSD Course GSystem
if course_node_ids:
course_node = node_collection.find_one({
"_id": {"$in": course_node_ids}
})
# If course_code doesn't exists then
# set NUSSD Course GSystem's name as course_code
if course_node:
nc_id = course_node._id
for attr in course_node.attribute_set:
if "course_code" in attr:
nc_course_code = attr["course_code"]
break
if not nc_course_code:
nc_course_code = course_node.name.replace(" ", "-")
# Append to ac_nc_code_list
ac_nc_code_list.append([ac_node, nc_id, nc_course_code])
# For each selected college
# Create Announced Course GSystem
for college_node in colg_list_cur:
# Fetch Enrollment code from "enrollment_code" (Attribute)
college_enrollment_code = ""
if college_node:
for attr in college_node.attribute_set:
if attr and "enrollment_code" in attr:
college_enrollment_code = attr["enrollment_code"]
break
ann_course_id_list = []
# For each selected course to Announce
for ac_nc_code in ac_nc_code_list:
course_gs = ac_nc_code[0]
nc_id = ac_nc_code[1]
cnode_for_content = node_collection.one({'_id': ObjectId(nc_id)})
nc_course_code = ac_nc_code[2]
if not course_gs:
# Create new Announced Course GSystem
course_gs = node_collection.collection.GSystem()
course_gs.member_of.append(course_gst._id)
if tiss_site:
# Prepare name for Announced Course GSystem
c_name = unicode(
nc_course_code + " - " + college_enrollment_code + " - "
+ start_time.strftime("%b %Y") + " - "
+ end_time.strftime("%b %Y")
)
else:
# Prepare name for Announced Course GSystem
c_name = unicode(
nc_course_code + " - "+ start_time.strftime("%b %Y") + " - "
+ end_time.strftime("%b %Y")
)
request.POST["name"] = c_name
is_changed = get_node_common_fields(
request, course_gs, group_id, course_gst
)
if is_changed:
# Remove this when publish button is setup on interface
course_gs.status = u"PUBLISHED"
course_gs.content_org = cnode_for_content.content_org
course_gs.content = cnode_for_content.html_content
course_gs.save(is_changed=is_changed)
# [B] Store AT and/or RT field(s) of given course-node
for tab_details in property_order_list:
for field_set in tab_details[1]:
# Fetch only Attribute field(s) / Relation field(s)
if '_id' in field_set:
field_instance = node_collection.one({
'_id': field_set['_id']
})
field_instance_type = type(field_instance)
if (field_instance_type in
[AttributeType, RelationType]):
field_data_type = field_set['data_type']
# Fetch field's value depending upon AT/RT
# and Parse fetched-value depending upon
# that field's data-type
if field_instance_type == AttributeType:
if "File" in field_instance["validators"]:
# Special case: AttributeTypes that require file instance as it's value in which case file document's ObjectId is used
if field_instance["name"] in request.FILES:
field_value = request.FILES[field_instance["name"]]
else:
field_value = ""
# Below 0th index is used because that function returns tuple(ObjectId, bool-value)
if field_value != '' and field_value != u'':
file_name = course_gs.name + " -- " + field_instance["altnames"]
content_org = ""
tags = ""
field_value = save_file(field_value, file_name, request.user.id, group_id, content_org, tags, oid=True)[0]
else:
# Other AttributeTypes
field_value = request.POST.get(field_instance["name"], "")
if field_instance["name"] in ["start_time", "end_time"]:
# Course Duration
field_value = parse_template_data(field_data_type, field_value, date_format_string="%m/%Y")
else:
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
course_gs_triple_instance = create_gattribute(course_gs._id, node_collection.collection.AttributeType(field_instance), field_value)
else:
# i.e if field_instance_type == RelationType
if field_instance["name"] == "announced_for":
field_value = ObjectId(nc_id)
# Pass ObjectId of selected Course
elif field_instance["name"] == "acourse_for_college":
field_value = college_node._id
# Pass ObjectId of selected College
course_gs_triple_instance = create_grelation(course_gs._id, node_collection.collection.RelationType(field_instance), field_value)
ann_course_id_list.append(course_gs._id)
#commented email notifications to all registered user after announcement
# if not tiss_site:
# site = Site.objects.get(pk=1)
# site = site.name.__str__()
# ann_course_url_link = "http://" + site + "/home/course/course_detail/" + \
# str(course_gs._id)
# user_obj = User.objects.all()
# # Sending email to all registered users on site NROER
# render_label = render_to_string(
# "notification/label.html",
# {"sender": "NROER eCourses",
# "activity": "Course Announcement",
# "conjunction": "-"
# })
# if user_obj:
# notification.create_notice_type(render_label," New eCourse '"\
# + str(course_gs.name) +"' has been announced."\
# +" Visit this link to enroll into this ecourse : " \
# + ann_course_url_link, "notification")
# notification.send(user_obj, render_label, {"from_user": "NROER eCourses"})
else:
is_changed = get_node_common_fields(request, course_gs, group_id, course_gst)
if is_changed:
# Remove this when publish button is setup on interface
course_gs.status = u"PUBLISHED"
course_gs.save(is_changed=is_changed)
# [B] Store AT and/or RT field(s) of given course-node
for tab_details in property_order_list:
for field_set in tab_details[1]:
# Fetch only Attribute field(s) / Relation field(s)
if '_id' in field_set:
field_instance = node_collection.one({'_id': field_set['_id']})
field_instance_type = type(field_instance)
if field_instance_type in [AttributeType, RelationType]:
field_data_type = field_set['data_type']
# Fetch field's value depending upon AT/RT
# and Parse fetched-value depending upon
# that field's data-type
if field_instance_type == AttributeType:
if "File" in field_instance["validators"]:
# Special case: AttributeTypes that require file instance as it's value in which case file document's ObjectId is used
if field_instance["name"] in request.FILES:
field_value = request.FILES[field_instance["name"]]
else:
field_value = ""
# Below 0th index is used because that function returns tuple(ObjectId, bool-value)
if field_value != '' and field_value != u'':
file_name = course_gs.name + " -- " + field_instance["altnames"]
content_org = ""
tags = ""
field_value = save_file(field_value, file_name, request.user.id, group_id, content_org, tags, oid=True)[0]
else:
# Other AttributeTypes
field_value = request.POST.get(field_instance["name"], "")
# if field_instance["name"] in ["start_time","end_time"]:
# field_value = parse_template_data(field_data_type, field_value, date_format_string="%m/%Y")
# elif field_instance["name"] in ["start_enroll", "end_enroll"]: #Student Enrollment DUration
# field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y")
if field_instance["name"] in ["mast_tr_qualifications", "voln_tr_qualifications"]:
# Needs sepcial kind of parsing
field_value = []
tr_qualifications = request.POST.get(field_instance["name"], '')
if tr_qualifications:
qualifications_dict = {}
tr_qualifications = [qual.strip() for qual in tr_qualifications.split(",")]
for i, qual in enumerate(tr_qualifications):
if (i % 2) == 0:
if qual == "true":
qualifications_dict["mandatory"] = True
elif qual == "false":
qualifications_dict["mandatory"] = False
else:
qualifications_dict["text"] = unicode(qual)
field_value.append(qualifications_dict)
qualifications_dict = {}
elif field_instance["name"] in ["max_marks", "min_marks"]:
# Needed because both these fields' values are dependent upon evaluation_type field's value
evaluation_type = request.POST.get("evaluation_type", "")
if evaluation_type == u"Continuous":
field_value = None
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
else:
field_value = parse_template_data(field_data_type, field_value, date_format_string="%d/%m/%Y %H:%M")
course_gs_triple_instance = create_gattribute(
course_gs._id,
node_collection.collection.AttributeType(field_instance),
field_value
)
else:
#i.e if field_instance_type == RelationType
if field_instance["name"] == "announced_for":
field_value = ObjectId(cid)
#Pass ObjectId of selected Course
elif field_instance["name"] == "acourse_for_college":
field_value = college_node._id
#Pass ObjectId of selected College
course_gs_triple_instance = create_grelation(
course_gs._id,
node_collection.collection.RelationType(field_instance),
field_value
)
if tiss_site:
return HttpResponseRedirect(
reverse(
app_name.lower() + ":" + template_prefix + '_app_detail',
kwargs={
'group_id': group_id, "app_id": app_id,
"app_set_id": app_set_id
}
)
)
else:
return HttpResponseRedirect(
reverse(
"course",
kwargs={
'group_id': group_id
}
)
)
univ = node_collection.one({
'_type': "GSystemType", 'name': "University"
}, {
'_id': 1
})
university_cur = None
if not mis_admin:
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'_id': 1, 'name': 1, 'group_admin': 1}
)
if tiss_site:
hide_mis_meta_content = False
if univ and mis_admin:
university_cur = node_collection.find(
{'member_of': univ._id, 'group_set': mis_admin._id},
{'name': 1}
).sort('name', 1)
default_template = "ndf/course_create_edit.html"
context_variables = {
'groupid': group_id, 'group_id': group_id,
'app_id': app_id, 'app_name': app_name,
'app_collection_set': app_collection_set,
'app_set_id': app_set_id,
'title': title,
'hide_mis_meta_content':hide_mis_meta_content,
'tiss_site': tiss_site,
'university_cur': university_cur,
'property_order_list': property_order_list
}
if app_set_instance_id:
course_gs.get_neighbourhood(course_gs.member_of)
context_variables['node'] = course_gs
if "Announced Course" in course_gs.member_of_names_list:
for attr in course_gs.attribute_set:
if attr:
for eachk, eachv in attr.items():
context_variables[eachk] = eachv
for rel in course_gs.relation_set:
if rel:
for eachk, eachv in rel.items():
if eachv:
get_node_name = node_collection.one({'_id': eachv[0]})
context_variables[eachk] = get_node_name.name
try:
return render_to_response(
[template, default_template],
context_variables, context_instance=RequestContext(request)
)
except TemplateDoesNotExist as tde:
error_message = "\n CourseCreateEditViewError: This html template (" \
+ str(tde) + ") does not exists !!!\n"
raise Http404(error_message)
except Exception as e:
error_message = "\n CourseCreateEditViewError: " + str(e) + " !!!\n"
raise Exception(error_message)
@login_required
@get_execution_time
def mis_course_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Detail view of NUSSD Course/ Announced Course
"""
# print "\n Found course_detail n gone inn this...\n\n"
auth = None
if ObjectId.is_valid(group_id) is False:
group_ins = node_collection.one({'_type': "Group", "name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
app_name = app.name
# app_name = "mis"
app_set = ""
app_collection_set = []
title = ""
course_gst = None
course_gs = None
node = None
property_order_list = []
property_order_list_ac = []
is_link_needed = True # This is required to show Link button on interface that link's Student's/VoluntaryTeacher's node with it's corresponding Author node
template_prefix = "mis"
response_dict = {'success': False}
context_variables = {}
#Course structure collection _dict
course_collection_dict = {}
course_collection_list = []
course_structure_exists = False
if request.user:
if auth is None:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username)})
if auth:
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
if app_set_id:
course_gst = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set_id)}, {'name': 1, 'type_of': 1})
title = course_gst.name
template = "ndf/course_list.html"
query = {}
college = {}
course = {}
ac_data_set = []
records_list = []
if course_gst.name == "Announced Course":
query = {
"member_of": course_gst._id,
"group_set": ObjectId(group_id),
"status": "PUBLISHED",
"attribute_set.ann_course_closure": u"Open",
}
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 0,
'ac_id': "$_id",
'name': '$name',
'course': '$relation_set.announced_for',
'college': '$relation_set.acourse_for_college',
'nussd_course_type': '$attribute_set.nussd_course_type',
'created_at': "$created_at"
}
},
{
'$sort': {'created_at': 1}
}
])
records_list = res["result"]
if records_list:
for each in res["result"]:
if each["college"]:
colg_id = each["college"][0][0]
if colg_id not in college:
c = node_collection.one({"_id": colg_id}, {"name": 1, "relation_set.college_affiliated_to": 1})
each["college"] = c.name
each["college_id"] = c._id
college[colg_id] = {}
college[colg_id]["name"] = each["college"]
for rel in c.relation_set:
if rel and "college_affiliated_to" in rel:
univ_id = rel["college_affiliated_to"][0]
u = node_collection.one({"_id": univ_id}, {"name": 1})
each.update({"university": u.name})
college[colg_id]["university"] = each["university"]
college[colg_id]["university_id"] = u._id
each["university_id"] = u._id
else:
each["college"] = college[colg_id]["name"]
each["college_id"] = colg_id
each.update({"university": college[colg_id]["university"]})
each.update({"university_id": college[colg_id]["university_id"]})
if each["course"]:
course_id = each["course"][0][0]
if course_id not in course:
each["course"] = node_collection.one({"_id": course_id}).name
course[course_id] = each["course"]
else:
each["course"] = course[course_id]
ac_data_set.append(each)
column_headers = [
("name", "Announced Course Name"),
("course", "Course Name"),
("nussd_course_type", "Course Type"),
("college", "College"),
("university", "University")
]
else:
query = {
"member_of": course_gst._id,
"group_set": ObjectId(group_id),
}
res = node_collection.collection.aggregate([
{
'$match': query
}, {
'$project': {
'_id': 0,
'ac_id': "$_id",
'name': '$name',
'nussd_course_type': '$attribute_set.nussd_course_type',
'created_at': "$created_at"
}
},
{
'$sort': {'created_at': 1}
}
])
records_list = res["result"]
if records_list:
for each in res["result"]:
ac_data_set.append(each)
column_headers = [
("ac_id", "Edit"),
("name", "Course Name"),
("nussd_course_type", "Course Type"),
]
response_dict["column_headers"] = column_headers
response_dict["success"] = True
response_dict["students_data_set"] = ac_data_set
response_dict["groupid"] = group_id
response_dict["app_id"] = app_id
response_dict["app_set_id"] = app_set_id
if app_set_instance_id:
template = "ndf/course_details.html"
node = node_collection.one({'_type': "GSystem", '_id': ObjectId(app_set_instance_id)})
property_order_list = get_property_order_with_value(node)
node.get_neighbourhood(node.member_of)
if title == u"Announced Course":
property_order_list_ac = node.attribute_set
# Course structure as list of dicts
if node.collection_set:
course_structure_exists = True
context_variables = { 'groupid': group_id, 'group_id': group_id,
'app_id': app_id, 'app_name': app_name, 'app_collection_set': app_collection_set,
'app_set_id': app_set_id,
'course_gst_name': course_gst.name,
'title': title,
'course_structure_exists': course_structure_exists,
'node': node,
'property_order_list': property_order_list,
'property_order_list_ac': property_order_list_ac,
'is_link_needed': is_link_needed,
'response_dict':json.dumps(response_dict, cls=NodeJSONEncoder)
}
try:
# print "\n template-list: ", [template, default_template]
# template = "ndf/fgh.html"
# default_template = "ndf/dsfjhk.html"
# return render_to_response([template, default_template],
return render_to_response(template,
context_variables,
context_instance = RequestContext(request)
)
except TemplateDoesNotExist as tde:
error_message = "\n CourseDetailListViewError: This html template (" + str(tde) + ") does not exists !!!\n"
raise Http404(error_message)
except Exception as e:
error_message = "\n CourseDetailListViewError: " + str(e) + " !!!\n"
raise Exception(error_message)
# Ajax views for setting up Course Structure
@login_required
@get_execution_time
def create_course_struct(request, group_id, node_id):
"""
This view is to create the structure of the Course.
A Course holds CourseSection, which further holds CourseSubSection
in their respective collection_set.
A tree depiction to this is as follows:
Course Name:
1. CourseSection1
1.1. CourseSubSection1
1.2. CourseSubSection2
2. CourseSection2
2.1. CourseSubSection3
"""
ins_objectid = ObjectId()
if ins_objectid.is_valid(group_id) is False:
group_ins = node_collection.find_one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth:
group_id = str(auth._id)
else:
pass
app_id = None
app_set_id = None
tiss_site = False
property_order_list_cs = []
property_order_list_css = []
course_structure_exists = False
title = "Course Authoring"
if GSTUDIO_SITE_NAME is "TISS":
tiss_site = True
course_node = node_collection.one({"_id": ObjectId(node_id)})
cs_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSection"})
cs_gs = node_collection.collection.GSystem()
cs_gs.member_of.append(cs_gst._id)
property_order_list_cs = get_property_order_with_value(cs_gs)
css_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSubSection"})
css_gs = node_collection.collection.GSystem()
css_gs.member_of.append(css_gst._id)
property_order_list_css = get_property_order_with_value(css_gs)
course_collection_list = course_node.collection_set
if course_collection_list:
course_structure_exists = True
# for attr in course_node.attribute_set:
# if attr.has_key("evaluation_type"):
# eval_type = attr["evaluation_type"]
#If evaluation_type flag is True, it is Final. If False, it is Continous
# if(eval_type==u"Final"):
# eval_type_flag = True
# else:
# eval_type_flag = False
if request.method == "GET":
app_id = request.GET.get("app_id", "")
app_set_id = request.GET.get("app_set_id", "")
return render_to_response("ndf/create_course_structure.html",
{'cnode': course_node,
'groupid': group_id,
'group_id': group_id,
'title': title,
'tiss_site':tiss_site,
'app_id': app_id, 'app_set_id': app_set_id,
'property_order_list': property_order_list_cs,
'property_order_list_css': property_order_list_css
},
context_instance=RequestContext(request)
)
@login_required
def save_course_section(request, group_id):
'''
Accepts:
* NUSSD Course/Course node _id
* CourseSection name
Actions:
* Creates CourseSection GSystem with name received.
* Appends this new CourseSection node id into
NUSSD Course/Course collection_set
Returns:
* success (i.e True/False)
* ObjectId of CourseSection node
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
cs_node_name = request.POST.get("cs_name", '')
course_node_id = request.POST.get("course_node_id", '')
cs_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSection"})
cs_new = node_collection.collection.GSystem()
cs_new.member_of.append(cs_gst._id)
cs_new.name = cs_node_name
cs_new.modified_by = int(request.user.id)
cs_new.created_by = int(request.user.id)
cs_new.contributors.append(int(request.user.id))
course_node = node_collection.one({"_id": ObjectId(course_node_id)})
cs_new.prior_node.append(ObjectId(course_node._id))
cs_new.save()
node_collection.collection.update({'_id': course_node._id}, {'$push': {'collection_set': cs_new._id }}, upsert=False, multi=False)
response_dict["success"] = True
response_dict["cs_new_id"] = str(cs_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def save_course_sub_section(request, group_id):
'''
Accepts:
* CourseSection node _id
* CourseSubSection name
Actions:
* Creates CourseSubSection GSystem with name received.
* Appends this new CourseSubSection node id into
CourseSection collection_set
Returns:
* success (i.e True/False)
* ObjectId of CourseSubSection node
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
css_node_name = request.POST.get("css_name", '')
cs_node_id = request.POST.get("cs_node_id", '')
css_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseSubSection"})
css_new = node_collection.collection.GSystem()
css_new.member_of.append(css_gst._id)
# set name
css_new.name = css_node_name
css_new.modified_by = int(request.user.id)
css_new.created_by = int(request.user.id)
css_new.contributors.append(int(request.user.id))
cs_node = node_collection.one({"_id": ObjectId(cs_node_id)})
css_new.prior_node.append(cs_node._id)
css_new.save()
node_collection.collection.update({'_id': cs_node._id}, {'$push': {'collection_set': css_new._id }}, upsert=False, multi=False)
response_dict["success"] = True
response_dict["css_new_id"] = str(css_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def change_node_name(request, group_id):
'''
Accepts:
* CourseSection/ CourseSubSection node _id
* New name for CourseSection node
Actions:
* Updates received node's name
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
node_id = request.POST.get("node_id", '')
new_name = request.POST.get("new_name", '')
node = node_collection.one({"_id": ObjectId(node_id)})
node.name = new_name.strip()
node.save()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
@login_required
def change_order(request, group_id):
'''
Accepts:
* 2 node ids.
Basically, either of CourseSection or CourseSubSection
* Parent node id
Either a NUSSD Course/Course or CourseSection
Actions:
* Swaps the 2 node ids in the collection set of received
parent node
'''
response_dict = {"success": False}
collection_set_list = []
if request.is_ajax() and request.method == "POST":
node_id_up = request.POST.get("node_id_up", '')
node_id_down = request.POST.get("node_id_down", '')
parent_node_id = request.POST.get("parent_node", '')
parent_node = node_collection.one({"_id": ObjectId(parent_node_id)})
collection_set_list = parent_node.collection_set
a, b = collection_set_list.index(ObjectId(node_id_up)), collection_set_list.index(ObjectId(node_id_down))
collection_set_list[b], collection_set_list[a] = collection_set_list[a], collection_set_list[b]
node_collection.collection.update({'_id': parent_node._id}, {'$set': {'collection_set': collection_set_list }}, upsert=False, multi=False)
parent_node.reload()
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
@login_required
def course_sub_section_prop(request, group_id):
'''
Accepts:
* CourseSubSection node _id
* Properties dict
Actions:
* Creates GAttributes with the values of received dict
for the respective CourseSubSection node
Returns:
* success (i.e True/False)
* If request.method is POST, all GAttributes in a dict structure,
'''
response_dict = {"success": False}
if request.is_ajax():
if request.method == "POST":
assessment_flag = False
css_node_id = request.POST.get("css_node_id", '')
prop_dict = request.POST.get("prop_dict", '')
assessment_chk = json.loads(request.POST.get("assessment_chk", ''))
prop_dict = json.loads(prop_dict)
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
at_cs_hours = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_minutes'})
at_cs_assessment = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_assessment'})
at_cs_assignment = node_collection.one({'_type': 'AttributeType', 'name': 'course_structure_assignment'})
at_cs_min_marks = node_collection.one({'_type': 'AttributeType', 'name': 'min_marks'})
at_cs_max_marks = node_collection.one({'_type': 'AttributeType', 'name': 'max_marks'})
if assessment_chk is True:
create_gattribute(css_node._id, at_cs_assessment, True)
assessment_flag = True
for propk, propv in prop_dict.items():
# add attributes to css gs
if(propk == "course_structure_minutes"):
create_gattribute(css_node._id, at_cs_hours, int(propv))
elif(propk == "course_structure_assignment"):
create_gattribute(css_node._id, at_cs_assignment, propv)
if assessment_flag:
if(propk == "min_marks"):
create_gattribute(css_node._id, at_cs_min_marks, int(propv))
if(propk == "max_marks"):
create_gattribute(css_node._id, at_cs_max_marks, int(propv))
css_node.reload()
response_dict["success"] = True
else:
css_node_id = request.GET.get("css_node_id", '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
if css_node.attribute_set:
for each in css_node.attribute_set:
for k, v in each.items():
response_dict[k] = v
response_dict["success"] = True
else:
response_dict["success"] = False
return HttpResponse(json.dumps(response_dict))
@login_required
def add_units(request, group_id):
'''
Accepts:
* CourseSubSection node _id
* NUSSD Course/Course node _id
Actions:
* Redirects to course_units.html
'''
variable = None
unit_node = None
css_node_id = request.GET.get('css_node_id', '')
unit_node_id = request.GET.get('unit_node_id', '')
course_node_id = request.GET.get('course_node', '')
app_id = request.GET.get('app_id', '')
app_set_id = request.GET.get('app_set_id', '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
course_node = node_collection.one({"_id": ObjectId(course_node_id)})
title = "Course Units"
try:
unit_node = node_collection.one({"_id": ObjectId(unit_node_id)})
except:
unit_node = None
variable = RequestContext(request, {
'group_id': group_id, 'groupid': group_id,
'css_node': css_node,
'title': title,
'app_set_id': app_set_id,
'app_id': app_id,
'unit_node': unit_node,
'course_node': course_node,
})
template = "ndf/course_units.html"
return render_to_response(template, variable)
@login_required
def get_resources(request, group_id):
'''
Accepts:
* Name of GSystemType (Page, File, etc.)
* CourseSubSection node _id
* widget_for
Actions:
* Fetches all GSystems of selected GSystemType as resources
Returns:
* Returns Drawer with resources
'''
response_dict = {'success': False, 'message': ""}
try:
if request.is_ajax() and request.method == "POST":
css_node_id = request.POST.get('css_node_id', "")
unit_node_id = request.POST.get('unit_node_id', "")
widget_for = request.POST.get('widget_for', "")
resource_type = request.POST.get('resource_type', "")
resource_type = resource_type.strip()
list_resources = []
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
try:
unit_node = node_collection.one({"_id": ObjectId(unit_node_id)})
except:
unit_node = None
if resource_type:
if resource_type == "Pandora":
resource_type = "Pandora_video"
resource_gst = node_collection.one({'_type': "GSystemType", 'name': resource_type})
res = node_collection.find(
{
'member_of': resource_gst._id,
'group_set': ObjectId(group_id),
'status': u"PUBLISHED"
}
)
for each in res:
list_resources.append(each)
drawer_template_context = edit_drawer_widget("CourseUnits", group_id, unit_node, None, checked="collection_set", left_drawer_content=list_resources)
drawer_template_context["widget_for"] = widget_for
drawer_widget = render_to_string(
'ndf/drawer_widget.html',
drawer_template_context,
context_instance=RequestContext(request)
)
return HttpResponse(drawer_widget)
else:
error_message = "Resource Drawer: Either not an ajax call or not a POST request!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
except Exception as e:
error_message = "Resource Drawer: " + str(e) + "!!!"
response_dict["message"] = error_message
return HttpResponse(json.dumps(response_dict))
@login_required
def save_resources(request, group_id):
'''
Accepts:
* List of resources (i.e GSystem of Page, File, etc.)
* CourseSubSection node _id
Actions:
* Sets the received resources in respective node's collection_set
'''
response_dict = {"success": False,"create_new_unit": True}
if request.is_ajax() and request.method == "POST":
list_of_res = json.loads(request.POST.get('list_of_res', ""))
css_node_id = request.POST.get('css_node', "")
unit_name = request.POST.get('unit_name', "")
unit_name = unit_name.strip()
unit_node_id = request.POST.get('unit_node_id', "")
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
list_of_res_ids = [ObjectId(each_res) for each_res in list_of_res]
try:
cu_new = node_collection.one({'_id': ObjectId(unit_node_id)})
except:
cu_new = None
if not cu_new:
cu_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseUnit"})
cu_new = node_collection.collection.GSystem()
cu_new.member_of.append(cu_gst._id)
# set name
cu_new.name = unit_name.strip()
cu_new.modified_by = int(request.user.id)
cu_new.created_by = int(request.user.id)
cu_new.contributors.append(int(request.user.id))
cu_new.prior_node.append(css_node._id)
cu_new.save()
response_dict["create_new_unit"] = True
node_collection.collection.update({'_id': cu_new._id}, {'$set': {'name': unit_name }}, upsert=False, multi=False)
if cu_new._id not in css_node.collection_set:
node_collection.collection.update({'_id': css_node._id}, {'$push': {'collection_set': cu_new._id }}, upsert=False, multi=False)
node_collection.collection.update({'_id': cu_new._id}, {'$set': {'collection_set':list_of_res_ids}},upsert=False,multi=False)
cu_new.reload()
response_dict["success"] = True
response_dict["cu_new_id"] = str(cu_new._id)
return HttpResponse(json.dumps(response_dict))
@login_required
def create_edit_unit(request, group_id):
'''
Accepts:
* ObjectId of unit node if exists
* ObjectId of CourseSubSection node
Actions:
* Creates/Updates Unit node
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
css_node_id = request.POST.get("css_node_id", '')
unit_node_id = request.POST.get("unit_node_id", '')
unit_name = request.POST.get("unit_name", '')
css_node = node_collection.one({"_id": ObjectId(css_node_id)})
try:
cu_node = node_collection.one({'_id': ObjectId(unit_node_id)})
except:
cu_node = None
if cu_node is None:
cu_gst = node_collection.one({'_type': "GSystemType", 'name': "CourseUnit"})
cu_node = node_collection.collection.GSystem()
cu_node.member_of.append(cu_gst._id)
# set name
cu_node.name = unit_name.strip()
cu_node.modified_by = int(request.user.id)
cu_node.created_by = int(request.user.id)
cu_node.contributors.append(int(request.user.id))
cu_node.prior_node.append(css_node._id)
cu_node.save()
response_dict["unit_node_id"] = str(cu_node._id)
node_collection.collection.update({'_id': cu_node._id}, {'$set': {'name': unit_name}}, upsert=False, multi=False)
if cu_node._id not in css_node.collection_set:
node_collection.collection.update({'_id': css_node._id}, {'$push': {'collection_set': cu_node._id}}, upsert=False, multi=False)
return HttpResponse(json.dumps(response_dict))
@login_required
def delete_course(request, group_id, node_id):
del_stat = delete_item(node_id)
if del_stat:
return HttpResponseRedirect(reverse('course', kwargs={'group_id': ObjectId(group_id)}))
@login_required
def delete_from_course_structure(request, group_id):
'''
Accepts:
* ObjectId of node that is to be deleted.
It can be CourseSection/CourseSubSection/CourseUnit
Actions:
* Deletes the received node
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
del_stat = False
if request.is_ajax() and request.method == "POST":
oid = request.POST.get("oid", '')
del_stat = delete_item(oid)
if del_stat:
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
def delete_item(item):
node_item = node_collection.one({'_id': ObjectId(item)})
if u"CourseUnit" not in node_item.member_of_names_list and node_item.collection_set:
for each in node_item.collection_set:
d_st = delete_item(each)
del_status, del_status_msg = delete_node(
node_id=node_item._id,
deletion_type=0
)
return del_status
@login_required
def enroll_generic(request, group_id):
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
course_enrollment_status_at = node_collection.one({
"_type": "AttributeType", "name": "course_enrollment_status"
})
node_id = request.POST.get('node_id', '')
usr_id = request.POST.get('usr_id', '')
usr_id = int(usr_id)
auth_node = node_collection.one({'_type': "Author", 'created_by': usr_id})
course_node = node_collection.one({'_id': ObjectId(node_id)})
course_enrollment_status = {}
if auth_node.attribute_set:
for each in auth_node.attribute_set:
if each and "course_enrollment_status" in each:
course_enrollment_status = each["course_enrollment_status"]
str_course_id = str(course_node._id)
if course_enrollment_status is not None:
if str_course_id not in course_enrollment_status:
course_enrollment_status.update({str_course_id: u"Approved"})
at_node = create_gattribute(auth_node["_id"], course_enrollment_status_at, course_enrollment_status)
response_dict['success'] = True
return HttpResponse(json.dumps(response_dict))
else:
return HttpResponse(json.dumps(response_dict))
@login_required
def remove_resource_from_unit(request, group_id):
'''
Accepts:
* ObjectId of node to be removed from collection_set.
* ObjectId of unit_node.
Actions:
* Removed res_id from unit_node's collection_set
Returns:
* success (i.e True/False)
'''
response_dict = {"success": False}
if request.is_ajax() and request.method == "POST":
unit_node_id = request.POST.get("unit_node_id", '')
res_id = request.POST.get("res_id", '')
unit_node = node_collection.one({'_id': ObjectId(unit_node_id)})
if unit_node.collection_set and res_id:
node_collection.collection.update({'_id': unit_node._id}, {'$pull': {'collection_set': ObjectId(res_id)}}, upsert=False, multi=False)
response_dict["success"] = True
return HttpResponse(json.dumps(response_dict))
|
agpl-3.0
|
dsfsdgsbngfggb/odoo
|
addons/lunch/tests/__init__.py
|
260
|
1077
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_lunch
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
raju249/oppia
|
core/jobs_registry.py
|
2
|
2820
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Job registries."""
from core.domain import exp_jobs_one_off
from core.domain import feedback_jobs_continuous
from core.domain import stats_jobs_continuous
from core.domain import stats_jobs_one_off
from core.domain import user_jobs_continuous
from core.domain import user_jobs_one_off
from core.domain import email_jobs_one_off
# List of all manager classes for one-off batch jobs for which to show controls
# on the admin dashboard.
ONE_OFF_JOB_MANAGERS = [
user_jobs_one_off.DashboardSubscriptionsOneOffJob,
exp_jobs_one_off.IndexAllExplorationsJobManager,
exp_jobs_one_off.ExpSummariesCreationOneOffJob,
exp_jobs_one_off.ExplorationValidityJobManager,
stats_jobs_one_off.StatisticsAudit,
user_jobs_one_off.UserContributionsOneOffJob,
exp_jobs_one_off.ExplorationFirstPublishedOneOffJob,
exp_jobs_one_off.ExpSummariesContributorsOneOffJob,
user_jobs_one_off.UserFirstContributionMsecOneOffJob,
exp_jobs_one_off.ExplorationMigrationJobManager,
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob,
email_jobs_one_off.EmailHashRegenerationOneOffJob,
user_jobs_one_off.UserProfilePictureOneOffJob,
exp_jobs_one_off.ItemSelectionInteractionOneOffJob]
# List of all ContinuousComputation managers to show controls for on the
# admin dashboard.
# NOTE TO DEVELOPERS: When a new ContinuousComputation manager is defined,
# it should be registered here.
ALL_CONTINUOUS_COMPUTATION_MANAGERS = [
stats_jobs_continuous.StatisticsAggregator,
user_jobs_continuous.DashboardRecentUpdatesAggregator,
user_jobs_continuous.UserStatsAggregator,
feedback_jobs_continuous.FeedbackAnalyticsAggregator]
class ContinuousComputationEventDispatcher(object):
"""Dispatches events to the relevant ContinuousComputation classes."""
@classmethod
def dispatch_event(cls, event_type, *args, **kwargs):
"""Dispatches an incoming event to the ContinuousComputation
classes which listen to events of that type.
"""
for klass in ALL_CONTINUOUS_COMPUTATION_MANAGERS:
if event_type in klass.get_event_types_listened_to():
klass.on_incoming_event(event_type, *args, **kwargs)
|
apache-2.0
|
rhinstaller/pykickstart
|
tests/commands/module.py
|
2
|
3241
|
#
# Martin Kolman <[email protected]>
#
# Copyright 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
class F29_TestCase(CommandTest):
def runTest(self):
# basic parsing
self.assert_parse('module --name=nodejs --stream=6', 'module --name=nodejs --stream=6\n')
self.assert_parse('module --name=foo --stream=1337', 'module --name=foo --stream=1337\n')
# non integer stream ids should also be fine
self.assert_parse('module --name=bar --stream=baz', 'module --name=bar --stream=baz\n')
# --stream is optional
self.assert_parse_error('module')
self.assert_parse('module --name=foo', 'module --name=foo\n')
# but name needs to be always present
self.assert_parse_error('module --stream=bar')
# the values must not be empty
self.assert_parse_error('module foo --name=bar --stream=')
self.assert_parse_error('module foo --name= --stream=baz')
self.assert_parse_error('module foo --name= --stream=')
self.assert_parse_error('module foo --name --stream')
self.assert_parse_error('module foo --name')
# the module command does not take any absolute arguments
self.assert_parse_error('module foo')
self.assert_parse_error('module foo --name=bar')
self.assert_parse_error('module foo --name=bar --stream=baz')
# unknown options are an error
self.assert_parse_error('module --name=bar --stream=baz --uknown=stuff')
self.assert_parse_error('module --name=bar --uknown=stuff')
class RHEL8_TestCase(F29_TestCase):
def runTest(self):
# run F29 test case.
F29_TestCase.runTest(self)
# parse --disable
self.assert_parse('module --name=nodejs --stream=6 --disable', 'module --name=nodejs --stream=6 --disable\n')
self.assert_parse('module --name=foo --stream=1337 --disable', 'module --name=foo --stream=1337 --disable\n')
# no assignment to the --disable option
self.assert_parse_error('module --name=nodejs --stream=6 --disable=bar')
self.assert_parse_error('module --name=foo --stream=1337 --disable=bar')
class F31_TestCase(RHEL8_TestCase):
def runTest(self):
# run RHEL8 test case.
RHEL8_TestCase.runTest(self)
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
newswangerd/ansible
|
lib/ansible/vars/manager.py
|
13
|
35816
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from collections import defaultdict
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound, AnsibleAssertionError, AnsibleTemplateError
from ansible.inventory.host import Host
from ansible.inventory.helpers import sort_groups, get_group_vars
from ansible.module_utils._text import to_text
from ansible.module_utils.common._collections_compat import Mapping, MutableMapping, Sequence
from ansible.module_utils.six import iteritems, text_type, string_types
from ansible.plugins.loader import lookup_loader
from ansible.vars.fact_cache import FactCache
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.vars import combine_vars, load_extra_vars, load_options_vars
from ansible.utils.unsafe_proxy import wrap_var
from ansible.vars.clean import namespace_facts, clean_facts
from ansible.vars.plugins import get_vars_from_inventory_sources, get_vars_from_path
display = Display()
def preprocess_vars(a):
'''
Ensures that vars contained in the parameter passed in are
returned as a list of dictionaries, to ensure for instance
that vars loaded from a file conform to an expected state.
'''
if a is None:
return None
elif not isinstance(a, list):
data = [a]
else:
data = a
for item in data:
if not isinstance(item, MutableMapping):
raise AnsibleError("variable files must contain either a dictionary of variables, or a list of dictionaries. Got: %s (%s)" % (a, type(a)))
return data
class VariableManager:
_ALLOWED = frozenset(['plugins_by_group', 'groups_plugins_play', 'groups_plugins_inventory', 'groups_inventory',
'all_plugins_play', 'all_plugins_inventory', 'all_inventory'])
def __init__(self, loader=None, inventory=None, version_info=None):
self._nonpersistent_fact_cache = defaultdict(dict)
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = inventory
self._loader = loader
self._hostvars = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self._options_vars = load_options_vars(version_info)
# If the basedir is specified as the empty string then it results in cwd being used.
# This is not a safe location to load vars from.
basedir = self._options_vars.get('basedir', False)
self.safe_basedir = bool(basedir is False or basedir)
# load extra vars
self._extra_vars = load_extra_vars(loader=self._loader)
# load fact cache
try:
self._fact_cache = FactCache()
except AnsibleError as e:
# bad cache plugin is not fatal error
# fallback to a dict as in memory cache
display.warning(to_text(e))
self._fact_cache = {}
def __getstate__(self):
data = dict(
fact_cache=self._fact_cache,
np_fact_cache=self._nonpersistent_fact_cache,
vars_cache=self._vars_cache,
extra_vars=self._extra_vars,
host_vars_files=self._host_vars_files,
group_vars_files=self._group_vars_files,
omit_token=self._omit_token,
options_vars=self._options_vars,
inventory=self._inventory,
safe_basedir=self.safe_basedir,
)
return data
def __setstate__(self, data):
self._fact_cache = data.get('fact_cache', defaultdict(dict))
self._nonpersistent_fact_cache = data.get('np_fact_cache', defaultdict(dict))
self._vars_cache = data.get('vars_cache', defaultdict(dict))
self._extra_vars = data.get('extra_vars', dict())
self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
self._inventory = data.get('inventory', None)
self._options_vars = data.get('options_vars', dict())
self.safe_basedir = data.get('safe_basedir', False)
self._loader = None
self._hostvars = None
@property
def extra_vars(self):
return self._extra_vars
def set_inventory(self, inventory):
self._inventory = inventory
def get_vars(self, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True,
_hosts=None, _hosts_all=None, stage='task'):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- vars_cache[host] (if there is a host context)
- extra vars
``_hosts`` and ``_hosts_all`` should be considered private args, with only internal trusted callers relying
on the functionality they provide. These arguments may be removed at a later date without a deprecation
period and without warning.
'''
display.debug("in VariableManager get_vars()")
all_vars = dict()
magic_variables = self._get_magic_variables(
play=play,
host=host,
task=task,
include_hostvars=include_hostvars,
include_delegate_to=include_delegate_to,
_hosts=_hosts,
_hosts_all=_hosts_all,
)
_vars_sources = {}
def _combine_and_track(data, new_data, source):
'''
Wrapper function to update var sources dict and call combine_vars()
See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
'''
if C.DEFAULT_DEBUG:
# Populate var sources dict
for key in new_data:
_vars_sources[key] = source
return combine_vars(data, new_data)
# default for all cases
basedirs = []
if self.safe_basedir: # avoid adhoc/console loading cwd
basedirs = [self._loader.get_basedir()]
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = _combine_and_track(all_vars, role.get_default_vars(), "role '%s' defaults" % role.name)
if task:
# set basedirs
if C.PLAYBOOK_VARS_ROOT == 'all': # should be default
basedirs = task.get_search_path()
elif C.PLAYBOOK_VARS_ROOT in ('bottom', 'playbook_dir'): # only option in 2.4.0
basedirs = [task.get_search_path()[0]]
elif C.PLAYBOOK_VARS_ROOT != 'top':
# preserves default basedirs, only option pre 2.3
raise AnsibleError('Unknown playbook vars logic: %s' % C.PLAYBOOK_VARS_ROOT)
# if we have a task in this context, and that task has a role, make
# sure it sees its defaults above any other roles, as we previously
# (v1) made sure each task had a copy of its roles default vars
if task._role is not None and (play or task.action in C._ACTION_INCLUDE_ROLE):
all_vars = _combine_and_track(all_vars, task._role.get_default_vars(dep_chain=task.get_dep_chain()),
"role '%s' defaults" % task._role.name)
if host:
# THE 'all' group and the rest of groups for a host, used below
all_group = self._inventory.groups.get('all')
host_groups = sort_groups([g for g in host.get_groups() if g.name not in ['all']])
def _get_plugin_vars(plugin, path, entities):
data = {}
try:
data = plugin.get_vars(self._loader, path, entities)
except AttributeError:
try:
for entity in entities:
if isinstance(entity, Host):
data.update(plugin.get_host_vars(entity.name))
else:
data.update(plugin.get_group_vars(entity.name))
except AttributeError:
if hasattr(plugin, 'run'):
raise AnsibleError("Cannot use v1 type vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
else:
raise AnsibleError("Invalid vars plugin %s from %s" % (plugin._load_name, plugin._original_path))
return data
# internal functions that actually do the work
def _plugins_inventory(entities):
''' merges all entities by inventory source '''
return get_vars_from_inventory_sources(self._loader, self._inventory._sources, entities, stage)
def _plugins_play(entities):
''' merges all entities adjacent to play '''
data = {}
for path in basedirs:
data = _combine_and_track(data, get_vars_from_path(self._loader, path, entities, stage), "path '%s'" % path)
return data
# configurable functions that are sortable via config, remember to add to _ALLOWED if expanding this list
def all_inventory():
return all_group.get_vars()
def all_plugins_inventory():
return _plugins_inventory([all_group])
def all_plugins_play():
return _plugins_play([all_group])
def groups_inventory():
''' gets group vars from inventory '''
return get_group_vars(host_groups)
def groups_plugins_inventory():
''' gets plugin sources from inventory for groups '''
return _plugins_inventory(host_groups)
def groups_plugins_play():
''' gets plugin sources from play for groups '''
return _plugins_play(host_groups)
def plugins_by_groups():
'''
merges all plugin sources by group,
This should be used instead, NOT in combination with the other groups_plugins* functions
'''
data = {}
for group in host_groups:
data[group] = _combine_and_track(data[group], _plugins_inventory(group), "inventory group_vars for '%s'" % group)
data[group] = _combine_and_track(data[group], _plugins_play(group), "playbook group_vars for '%s'" % group)
return data
# Merge groups as per precedence config
# only allow to call the functions we want exposed
for entry in C.VARIABLE_PRECEDENCE:
if entry in self._ALLOWED:
display.debug('Calling %s to load vars for %s' % (entry, host.name))
all_vars = _combine_and_track(all_vars, locals()[entry](), "group vars, precedence entry '%s'" % entry)
else:
display.warning('Ignoring unknown variable precedence entry: %s' % (entry))
# host vars, from inventory, inventory adjacent and play adjacent via plugins
all_vars = _combine_and_track(all_vars, host.get_vars(), "host vars for '%s'" % host)
all_vars = _combine_and_track(all_vars, _plugins_inventory([host]), "inventory host_vars for '%s'" % host)
all_vars = _combine_and_track(all_vars, _plugins_play([host]), "playbook host_vars for '%s'" % host)
# finally, the facts caches for this host, if it exists
# TODO: cleaning of facts should eventually become part of taskresults instead of vars
try:
facts = wrap_var(self._fact_cache.get(host.name, {}))
all_vars.update(namespace_facts(facts))
# push facts to main namespace
if C.INJECT_FACTS_AS_VARS:
all_vars = _combine_and_track(all_vars, wrap_var(clean_facts(facts)), "facts")
else:
# always 'promote' ansible_local
all_vars = _combine_and_track(all_vars, wrap_var({'ansible_local': facts.get('ansible_local', {})}), "facts")
except KeyError:
pass
if play:
all_vars = _combine_and_track(all_vars, play.get_vars(), "play vars")
vars_files = play.get_vars_files()
try:
for vars_file_item in vars_files:
# create a set of temporary vars here, which incorporate the extra
# and magic vars so we can properly template the vars_files entries
temp_vars = combine_vars(all_vars, self._extra_vars)
temp_vars = combine_vars(temp_vars, magic_variables)
templar = Templar(loader=self._loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = vars_file_item
if not isinstance(vars_file_list, list):
vars_file_list = [vars_file_list]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
try:
for vars_file in vars_file_list:
vars_file = templar.template(vars_file)
if not (isinstance(vars_file, Sequence)):
raise AnsibleError(
"Invalid vars_files entry found: %r\n"
"vars_files entries should be either a string type or "
"a list of string types after template expansion" % vars_file
)
try:
data = preprocess_vars(self._loader.load_from_file(vars_file, unsafe=True))
if data is not None:
for item in data:
all_vars = _combine_and_track(all_vars, item, "play vars_files from '%s'" % vars_file)
break
except AnsibleFileNotFound:
# we continue on loader failures
continue
except AnsibleParserError:
raise
else:
# if include_delegate_to is set to False, we ignore the missing
# vars file here because we're working on a delegated host
if include_delegate_to:
raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'"
% vars_file_item, obj=vars_file_item)
else:
# we do not have a full context here, and the missing variable could be because of that
# so just show a warning and continue
display.vvv("skipping vars_file '%s' due to an undefined variable" % vars_file_item)
continue
display.vvv("Read vars_file '%s'" % vars_file_item)
except TypeError:
raise AnsibleParserError("Error while reading vars files - please supply a list of file names. "
"Got '%s' of type %s" % (vars_files, type(vars_files)))
# By default, we now merge in all vars from all roles in the play,
# unless the user has disabled this via a config option
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = _combine_and_track(all_vars, role.get_vars(include_params=False), "role '%s' vars" % role.name)
# next, we merge in the vars from the role, which will specifically
# follow the role dependency chain, and then we merge in the tasks
# vars (which will look at parent blocks/task includes)
if task:
if task._role:
all_vars = _combine_and_track(all_vars, task._role.get_vars(task.get_dep_chain(), include_params=False),
"role '%s' vars" % task._role.name)
all_vars = _combine_and_track(all_vars, task.get_vars(), "task vars")
# next, we merge in the vars cache (include vars) and nonpersistent
# facts cache (set_fact/register), in that order
if host:
# include_vars non-persistent cache
all_vars = _combine_and_track(all_vars, self._vars_cache.get(host.get_name(), dict()), "include_vars")
# fact non-persistent cache
all_vars = _combine_and_track(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()), "set_fact")
# next, we merge in role params and task include params
if task:
if task._role:
all_vars = _combine_and_track(all_vars, task._role.get_role_params(task.get_dep_chain()), "role '%s' params" % task._role.name)
# special case for include tasks, where the include params
# may be specified in the vars field for the task, which should
# have higher precedence than the vars/np facts above
all_vars = _combine_and_track(all_vars, task.get_include_params(), "include params")
# extra vars
all_vars = _combine_and_track(all_vars, self._extra_vars, "extra vars")
# magic variables
all_vars = _combine_and_track(all_vars, magic_variables, "magic vars")
# special case for the 'environment' magic variable, as someone
# may have set it as a variable and we don't want to stomp on it
if task:
all_vars['environment'] = task.environment
# 'vars' magic var
if task or play:
# has to be copy, otherwise recursive ref
all_vars['vars'] = all_vars.copy()
# if we have a task and we're delegating to another host, figure out the
# variables for that host now so we don't have to rely on hostvars later
if task and task.delegate_to is not None and include_delegate_to:
all_vars['ansible_delegated_vars'], all_vars['_ansible_loop_cache'] = self._get_delegated_vars(play, task, all_vars)
display.debug("done with get_vars()")
if C.DEFAULT_DEBUG:
# Use VarsWithSources wrapper class to display var sources
return VarsWithSources.new_vars_with_sources(all_vars, _vars_sources)
else:
return all_vars
def _get_magic_variables(self, play, host, task, include_hostvars, include_delegate_to,
_hosts=None, _hosts_all=None):
'''
Returns a dictionary of so-called "magic" variables in Ansible,
which are special variables we set internally for use.
'''
variables = {}
variables['playbook_dir'] = os.path.abspath(self._loader.get_basedir())
variables['ansible_playbook_python'] = sys.executable
variables['ansible_config_file'] = C.CONFIG_FILE
if play:
# This is a list of all role names of all dependencies for all roles for this play
dependency_role_names = list(set([d.get_name() for r in play.roles for d in r.get_all_dependencies()]))
# This is a list of all role names of all roles for this play
play_role_names = [r.get_name() for r in play.roles]
# ansible_role_names includes all role names, dependent or directly referenced by the play
variables['ansible_role_names'] = list(set(dependency_role_names + play_role_names))
# ansible_play_role_names includes the names of all roles directly referenced by this play
# roles that are implicitly referenced via dependencies are not listed.
variables['ansible_play_role_names'] = play_role_names
# ansible_dependent_role_names includes the names of all roles that are referenced via dependencies
# dependencies that are also explicitly named as roles are included in this list
variables['ansible_dependent_role_names'] = dependency_role_names
# DEPRECATED: role_names should be deprecated in favor of ansible_role_names or ansible_play_role_names
variables['role_names'] = variables['ansible_play_role_names']
variables['ansible_play_name'] = play.get_name()
if task:
if task._role:
variables['role_name'] = task._role.get_name(include_role_fqcn=False)
variables['role_path'] = task._role._role_path
variables['role_uuid'] = text_type(task._role._uuid)
variables['ansible_collection_name'] = task._role._role_collection
variables['ansible_role_name'] = task._role.get_name()
if self._inventory is not None:
variables['groups'] = self._inventory.get_groups_dict()
if play:
templar = Templar(loader=self._loader)
if templar.is_template(play.hosts):
pattern = 'all'
else:
pattern = play.hosts or 'all'
# add the list of hosts in the play, as adjusted for limit/filters
if not _hosts_all:
_hosts_all = [h.name for h in self._inventory.get_hosts(pattern=pattern, ignore_restrictions=True)]
if not _hosts:
_hosts = [h.name for h in self._inventory.get_hosts()]
variables['ansible_play_hosts_all'] = _hosts_all[:]
variables['ansible_play_hosts'] = [x for x in variables['ansible_play_hosts_all'] if x not in play._removed_hosts]
variables['ansible_play_batch'] = [x for x in _hosts if x not in play._removed_hosts]
# DEPRECATED: play_hosts should be deprecated in favor of ansible_play_batch,
# however this would take work in the templating engine, so for now we'll add both
variables['play_hosts'] = variables['ansible_play_batch']
# the 'omit' value allows params to be left out if the variable they are based on is undefined
variables['omit'] = self._omit_token
# Set options vars
for option, option_value in iteritems(self._options_vars):
variables[option] = option_value
if self._hostvars is not None and include_hostvars:
variables['hostvars'] = self._hostvars
return variables
def _get_delegated_vars(self, play, task, existing_variables):
# This method has a lot of code copied from ``TaskExecutor._get_loop_items``
# if this is failing, and ``TaskExecutor._get_loop_items`` is not
# then more will have to be copied here.
# TODO: dedupe code here and with ``TaskExecutor._get_loop_items``
# this may be possible once we move pre-processing pre fork
if not hasattr(task, 'loop'):
# This "task" is not a Task, so we need to skip it
return {}, None
# we unfortunately need to template the delegate_to field here,
# as we're fetching vars before post_validate has been called on
# the task that has been passed in
vars_copy = existing_variables.copy()
# get search path for this task to pass to lookup plugins
vars_copy['ansible_search_path'] = task.get_search_path()
# ensure basedir is always in (dwim already searches here but we need to display it)
if self._loader.get_basedir() not in vars_copy['ansible_search_path']:
vars_copy['ansible_search_path'].append(self._loader.get_basedir())
templar = Templar(loader=self._loader, variables=vars_copy)
items = []
has_loop = True
if task.loop_with is not None:
if task.loop_with in lookup_loader:
fail = True
if task.loop_with == 'first_found':
# first_found loops are special. If the item is undefined then we want to fall through to the next
fail = False
try:
loop_terms = listify_lookup_plugin_terms(terms=task.loop, templar=templar,
loader=self._loader, fail_on_undefined=fail, convert_bare=False)
if not fail:
loop_terms = [t for t in loop_terms if not templar.is_template(t)]
mylookup = lookup_loader.get(task.loop_with, loader=self._loader, templar=templar)
# give lookup task 'context' for subdir (mostly needed for first_found)
for subdir in ['template', 'var', 'file']: # TODO: move this to constants?
if subdir in task.action:
break
setattr(mylookup, '_subdir', subdir + 's')
items = wrap_var(mylookup.run(terms=loop_terms, variables=vars_copy))
except AnsibleTemplateError:
# This task will be skipped later due to this, so we just setup
# a dummy array for the later code so it doesn't fail
items = [None]
else:
raise AnsibleError("Failed to find the lookup named '%s' in the available lookup plugins" % task.loop_with)
elif task.loop is not None:
try:
items = templar.template(task.loop)
except AnsibleTemplateError:
# This task will be skipped later due to this, so we just setup
# a dummy array for the later code so it doesn't fail
items = [None]
else:
has_loop = False
items = [None]
# since host can change per loop, we keep dict per host name resolved
delegated_host_vars = dict()
item_var = getattr(task.loop_control, 'loop_var', 'item')
cache_items = False
for item in items:
# update the variables with the item value for templating, in case we need it
if item is not None:
vars_copy[item_var] = item
templar.available_variables = vars_copy
delegated_host_name = templar.template(task.delegate_to, fail_on_undefined=False)
if delegated_host_name != task.delegate_to:
cache_items = True
if delegated_host_name is None:
raise AnsibleError(message="Undefined delegate_to host for task:", obj=task._ds)
if not isinstance(delegated_host_name, string_types):
raise AnsibleError(message="the field 'delegate_to' has an invalid type (%s), and could not be"
" converted to a string type." % type(delegated_host_name), obj=task._ds)
if delegated_host_name in delegated_host_vars:
# no need to repeat ourselves, as the delegate_to value
# does not appear to be tied to the loop item variable
continue
# now try to find the delegated-to host in inventory, or failing that,
# create a new host on the fly so we can fetch variables for it
delegated_host = None
if self._inventory is not None:
delegated_host = self._inventory.get_host(delegated_host_name)
# try looking it up based on the address field, and finally
# fall back to creating a host on the fly to use for the var lookup
if delegated_host is None:
for h in self._inventory.get_hosts(ignore_limits=True, ignore_restrictions=True):
# check if the address matches, or if both the delegated_to host
# and the current host are in the list of localhost aliases
if h.address == delegated_host_name:
delegated_host = h
break
else:
delegated_host = Host(name=delegated_host_name)
else:
delegated_host = Host(name=delegated_host_name)
# now we go fetch the vars for the delegated-to host and save them in our
# master dictionary of variables to be used later in the TaskExecutor/PlayContext
delegated_host_vars[delegated_host_name] = self.get_vars(
play=play,
host=delegated_host,
task=task,
include_delegate_to=False,
include_hostvars=True,
)
delegated_host_vars[delegated_host_name]['inventory_hostname'] = vars_copy.get('inventory_hostname')
_ansible_loop_cache = None
if has_loop and cache_items:
# delegate_to templating produced a change, so we will cache the templated items
# in a special private hostvar
# this ensures that delegate_to+loop doesn't produce different results than TaskExecutor
# which may reprocess the loop
_ansible_loop_cache = items
return delegated_host_vars, _ansible_loop_cache
def clear_facts(self, hostname):
'''
Clears the facts for a host
'''
self._fact_cache.pop(hostname, None)
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
if not isinstance(facts, Mapping):
raise AnsibleAssertionError("the type of 'facts' to set for host_facts should be a Mapping but is a %s" % type(facts))
try:
host_cache = self._fact_cache[host]
except KeyError:
# We get to set this as new
host_cache = facts
else:
if not isinstance(host_cache, MutableMapping):
raise TypeError('The object retrieved for {0} must be a MutableMapping but was'
' a {1}'.format(host, type(host_cache)))
# Update the existing facts
host_cache.update(facts)
# Save the facts back to the backing store
self._fact_cache[host] = host_cache
def set_nonpersistent_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
if not isinstance(facts, Mapping):
raise AnsibleAssertionError("the type of 'facts' to set for nonpersistent_facts should be a Mapping but is a %s" % type(facts))
try:
self._nonpersistent_fact_cache[host].update(facts)
except KeyError:
self._nonpersistent_fact_cache[host] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
if host not in self._vars_cache:
self._vars_cache[host] = dict()
if varname in self._vars_cache[host] and isinstance(self._vars_cache[host][varname], MutableMapping) and isinstance(value, MutableMapping):
self._vars_cache[host] = combine_vars(self._vars_cache[host], {varname: value})
else:
self._vars_cache[host][varname] = value
class VarsWithSources(MutableMapping):
'''
Dict-like class for vars that also provides source information for each var
This class can only store the source for top-level vars. It does no tracking
on its own, just shows a debug message with the information that it is provided
when a particular var is accessed.
'''
def __init__(self, *args, **kwargs):
''' Dict-compatible constructor '''
self.data = dict(*args, **kwargs)
self.sources = {}
@classmethod
def new_vars_with_sources(cls, data, sources):
''' Alternate constructor method to instantiate class with sources '''
v = cls(data)
v.sources = sources
return v
def get_source(self, key):
return self.sources.get(key, None)
def __getitem__(self, key):
val = self.data[key]
# See notes in the VarsWithSources docstring for caveats and limitations of the source tracking
display.debug("variable '%s' from source: %s" % (key, self.sources.get(key, "unknown")))
return val
def __setitem__(self, key, value):
self.data[key] = value
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
# Prevent duplicate debug messages by defining our own __contains__ pointing at the underlying dict
def __contains__(self, key):
return self.data.__contains__(key)
def copy(self):
return VarsWithSources.new_vars_with_sources(self.data.copy(), self.sources.copy())
|
gpl-3.0
|
onitake/ansible
|
contrib/inventory/lxc_inventory.py
|
79
|
2560
|
#!/usr/bin/env python
#
# (c) 2015-16 Florian Haas, hastexo Professional Services GmbH
# <[email protected]>
# Based in part on:
# libvirt_lxc.py, (c) 2013, Michael Scherer <[email protected]>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Ansible inventory script for LXC containers. Requires Python
bindings for LXC API.
In LXC, containers can be grouped by setting the lxc.group option,
which may be found more than once in a container's
configuration. So, we enumerate all containers, fetch their list
of groups, and then build the dictionary in the way Ansible expects
it.
"""
from __future__ import print_function
import sys
import lxc
import json
def build_dict():
"""Returns a dictionary keyed to the defined LXC groups. All
containers, including the ones not in any group, are included in the
"all" group."""
# Enumerate all containers, and list the groups they are in. Also,
# implicitly add every container to the 'all' group.
containers = dict([(c,
['all'] +
(lxc.Container(c).get_config_item('lxc.group') or []))
for c in lxc.list_containers()])
# Extract the groups, flatten the list, and remove duplicates
groups = set(sum([g for g in containers.values()], []))
# Create a dictionary for each group (including the 'all' group
return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
'vars': {'ansible_connection': 'lxc'}}) for g in groups])
def main(argv):
"""Returns a JSON dictionary as expected by Ansible"""
result = build_dict()
if len(argv) == 2 and argv[1] == '--list':
json.dump(result, sys.stdout)
elif len(argv) == 3 and argv[1] == '--host':
json.dump({'ansible_connection': 'lxc'}, sys.stdout)
else:
print("Need an argument, either --list or --host <host>", file=sys.stderr)
if __name__ == '__main__':
main(sys.argv)
|
gpl-3.0
|
zzicewind/nova
|
nova/db/sqlalchemy/migrate_repo/versions/216_havana.py
|
44
|
64425
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from oslo_log import log as logging
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Enum, Float
from sqlalchemy import dialects
from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
from nova.i18n import _LE
LOG = logging.getLogger(__name__)
# Note on the autoincrement flag: this is defaulted for primary key columns
# of integral type, so is no longer set explicitly in such cases.
# NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL
# Schema. In Folsom we created tables as latin1 and converted them to utf8
# later. This conversion causes some of the Text columns on MySQL to get
# created as mediumtext instead of just text.
def MediumText():
return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
def Inet():
return String(length=43).with_variant(dialects.postgresql.INET(),
'postgresql')
def InetSmall():
return String(length=39).with_variant(dialects.postgresql.INET(),
'postgresql')
def _create_shadow_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = list(meta.tables.keys())
meta.bind = migrate_engine
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
column_copy = None
# NOTE(boris-42): BigInteger is not supported by sqlite, so
# after copy it will have NullType, other
# types that are used in Nova are supported by
# sqlite.
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
if table_name == 'instances' and column.name == 'locked_by':
enum = Enum('owner', 'admin',
name='shadow_instances0locked_by')
column_copy = Column(column.name, enum)
else:
column_copy = column.copy()
columns.append(column_copy)
shadow_table_name = 'shadow_' + table_name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_LE('Exception while creating table.'))
raise
def _populate_instance_types(instance_types_table):
default_inst_types = {
'm1.tiny': dict(mem=512, vcpus=1, root_gb=1, eph_gb=0, flavid=1),
'm1.small': dict(mem=2048, vcpus=1, root_gb=20, eph_gb=0, flavid=2),
'm1.medium': dict(mem=4096, vcpus=2, root_gb=40, eph_gb=0, flavid=3),
'm1.large': dict(mem=8192, vcpus=4, root_gb=80, eph_gb=0, flavid=4),
'm1.xlarge': dict(mem=16384, vcpus=8, root_gb=160, eph_gb=0, flavid=5)
}
try:
i = instance_types_table.insert()
for name, values in default_inst_types.items():
i.execute({'name': name, 'memory_mb': values["mem"],
'vcpus': values["vcpus"], 'deleted': 0,
'root_gb': values["root_gb"],
'ephemeral_gb': values["eph_gb"],
'rxtx_factor': 1,
'swap': 0,
'flavorid': values["flavid"],
'disabled': False,
'is_public': True})
except Exception:
LOG.info(repr(instance_types_table))
LOG.exception(_LE('Exception while seeding instance_types table'))
raise
# NOTE(dprince): we add these here so our schema contains dump tables
# which were added in migration 209 (in Havana). We can drop these in
# Icehouse: https://bugs.launchpad.net/nova/+bug/1266538
def _create_dump_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions',
'instance_actions_events', 'instance_faults', 'migrations']
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
dump_table_name = 'dump_' + table.name
columns = []
for column in table.columns:
# NOTE(dprince): The dump_ tables were originally created from an
# earlier schema version so we don't want to add the pci_stats
# column so that schema diffs are exactly the same.
if column.name == 'pci_stats':
continue
else:
columns.append(column.copy())
table_dump = Table(dump_table_name, meta, *columns,
mysql_engine='InnoDB')
table_dump.create()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
agent_builds = Table('agent_builds', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('hypervisor', String(length=255)),
Column('os', String(length=255)),
Column('architecture', String(length=255)),
Column('version', String(length=255)),
Column('url', String(length=255)),
Column('md5hash', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_hosts = Table('aggregate_hosts', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_metadata = Table('aggregate_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregates = Table('aggregates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('device_name', String(length=255), nullable=True),
Column('delete_on_termination', Boolean),
Column('snapshot_id', String(length=36), nullable=True),
Column('volume_id', String(length=36), nullable=True),
Column('volume_size', Integer),
Column('no_device', Boolean),
Column('connection_info', MediumText()),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
Column('source_type', String(length=255), nullable=True),
Column('destination_type', String(length=255), nullable=True),
Column('guest_format', String(length=255), nullable=True),
Column('device_type', String(length=255), nullable=True),
Column('disk_bus', String(length=255), nullable=True),
Column('boot_index', Integer),
Column('image_id', String(length=36), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('start_period', DateTime, nullable=False),
Column('last_refreshed', DateTime),
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('mac', String(length=255)),
Column('uuid', String(length=36)),
Column('last_ctr_in', BigInteger()),
Column('last_ctr_out', BigInteger()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cells = Table('cells', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('api_url', String(length=255)),
Column('weight_offset', Float),
Column('weight_scale', Float),
Column('name', String(length=255)),
Column('is_parent', Boolean),
Column('deleted', Integer),
Column('transport_url', String(length=255), nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
certificates = Table('certificates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('file_name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_node_stats = Table('compute_node_stats', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('compute_node_id', Integer, nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_nodes = Table('compute_nodes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('service_id', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('local_gb', Integer, nullable=False),
Column('vcpus_used', Integer, nullable=False),
Column('memory_mb_used', Integer, nullable=False),
Column('local_gb_used', Integer, nullable=False),
Column('hypervisor_type', MediumText(), nullable=False),
Column('hypervisor_version', Integer, nullable=False),
Column('cpu_info', MediumText(), nullable=False),
Column('disk_available_least', Integer),
Column('free_ram_mb', Integer),
Column('free_disk_gb', Integer),
Column('current_workload', Integer),
Column('running_vms', Integer),
Column('hypervisor_hostname', String(length=255)),
Column('deleted', Integer),
Column('host_ip', InetSmall()),
Column('supported_instances', Text),
Column('pci_stats', Text, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('username', String(length=255)),
Column('password', String(length=255)),
Column('console_type', String(length=255)),
Column('public_hostname', String(length=255)),
Column('host', String(length=255)),
Column('compute_host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
# NOTE(mriedem): DB2 can't create the FK since we don't have the unique
# constraint on instances.uuid because it's nullable (so a unique
# constraint isn't created for instances.uuid, only a unique index).
consoles_instance_uuid_column_args = ['instance_uuid', String(length=36)]
if migrate_engine.name != 'ibm_db_sa':
consoles_instance_uuid_column_args.append(
ForeignKey('instances.uuid', name='consoles_instance_uuid_fkey'))
consoles = Table('consoles', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_name', String(length=255)),
Column('password', String(length=255)),
Column('port', Integer),
Column('pool_id', Integer, ForeignKey('console_pools.id')),
Column(*consoles_instance_uuid_column_args),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('domain', String(length=255), primary_key=True, nullable=False),
Column('scope', String(length=255)),
Column('availability_zone', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('network_id', Integer),
Column('allocated', Boolean),
Column('leased', Boolean),
Column('reserved', Boolean),
Column('virtual_interface_id', Integer),
Column('host', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
floating_ips = Table('floating_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('fixed_ip_id', Integer),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('auto_assigned', Boolean),
Column('pool', String(length=255)),
Column('interface', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36)),
Column('code', Integer, nullable=False),
Column('message', String(length=255)),
Column('details', MediumText()),
Column('host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_id_mappings = Table('instance_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_info_caches = Table('instance_info_caches', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('network_info', MediumText()),
Column('instance_uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
groups = Table('instance_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('uuid', String(length=36), nullable=False),
Column('name', String(length=255)),
UniqueConstraint('uuid', 'deleted',
name='uniq_instance_groups0uuid0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_metadata = Table('instance_group_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_policy = Table('instance_group_policy', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('policy', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_member = Table('instance_group_member', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_id', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_metadata = Table('instance_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_system_metadata = Table('instance_system_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36), nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, ForeignKey('instance_types.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_projects = Table('instance_type_projects', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_types = Table('instance_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('name', String(length=255)),
Column('id', Integer, primary_key=True, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('swap', Integer, nullable=False),
Column('vcpu_weight', Integer),
Column('flavorid', String(length=255)),
Column('rxtx_factor', Float),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('disabled', Boolean),
Column('is_public', Boolean),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
inst_lock_enum = Enum('owner', 'admin', name='instances0locked_by')
instances = Table('instances', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('internal_id', Integer),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('image_ref', String(length=255)),
Column('kernel_id', String(length=255)),
Column('ramdisk_id', String(length=255)),
Column('launch_index', Integer),
Column('key_name', String(length=255)),
Column('key_data', MediumText()),
Column('power_state', Integer),
Column('vm_state', String(length=255)),
Column('memory_mb', Integer),
Column('vcpus', Integer),
Column('hostname', String(length=255)),
Column('host', String(length=255)),
Column('user_data', MediumText()),
Column('reservation_id', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('availability_zone', String(length=255)),
Column('locked', Boolean),
Column('os_type', String(length=255)),
Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
Column('architecture', String(length=255)),
Column('root_device_name', String(length=255)),
Column('access_ip_v4', InetSmall()),
Column('access_ip_v6', InetSmall()),
Column('config_drive', String(length=255)),
Column('task_state', String(length=255)),
Column('default_ephemeral_device', String(length=255)),
Column('default_swap_device', String(length=255)),
Column('progress', Integer),
Column('auto_disk_config', Boolean),
Column('shutdown_terminate', Boolean),
Column('disable_terminate', Boolean),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('cell_name', String(length=255)),
Column('node', String(length=255)),
Column('deleted', Integer),
Column('locked_by', inst_lock_enum),
Column('cleaned', Integer, default=0),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('action', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('request_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('message', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_actions_events = Table('instance_actions_events', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('event', String(length=255)),
Column('action_id', Integer, ForeignKey('instance_actions.id')),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('result', String(length=255)),
Column('traceback', Text),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(length=255)),
Column('volume_id', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
key_pairs = Table('key_pairs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('user_id', String(length=255)),
Column('fingerprint', String(length=255)),
Column('public_key', MediumText()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
migrations = Table('migrations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('source_compute', String(length=255)),
Column('dest_compute', String(length=255)),
Column('dest_host', String(length=255)),
Column('status', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
Column('source_node', String(length=255)),
Column('dest_node', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
networks = Table('networks', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('injected', Boolean),
Column('cidr', Inet()),
Column('netmask', InetSmall()),
Column('bridge', String(length=255)),
Column('gateway', InetSmall()),
Column('broadcast', InetSmall()),
Column('dns1', InetSmall()),
Column('vlan', Integer),
Column('vpn_public_address', InetSmall()),
Column('vpn_public_port', Integer),
Column('vpn_private_address', InetSmall()),
Column('dhcp_start', InetSmall()),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('cidr_v6', Inet()),
Column('gateway_v6', InetSmall()),
Column('label', String(length=255)),
Column('netmask_v6', InetSmall()),
Column('bridge_interface', String(length=255)),
Column('multi_host', Boolean),
Column('dns2', InetSmall()),
Column('uuid', String(length=36)),
Column('priority', Integer),
Column('rxtx_base', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
pci_devices_uc_name = 'uniq_pci_devices0compute_node_id0address0deleted'
pci_devices = Table('pci_devices', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Integer, default=0, nullable=False),
Column('id', Integer, primary_key=True),
Column('compute_node_id', Integer, nullable=False),
Column('address', String(12), nullable=False),
Column('product_id', String(4)),
Column('vendor_id', String(4)),
Column('dev_type', String(8)),
Column('dev_id', String(255)),
Column('label', String(255), nullable=False),
Column('status', String(36), nullable=False),
Column('extra_info', Text, nullable=True),
Column('instance_uuid', String(36), nullable=True),
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
UniqueConstraint('compute_node_id',
'address', 'deleted',
name=pci_devices_uc_name),
mysql_engine='InnoDB',
mysql_charset='utf8')
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('class_name', String(length=255)),
Column('resource', String(length=255)),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_usages = Table('quota_usages', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('in_use', Integer, nullable=False),
Column('reserved', Integer, nullable=False),
Column('until_refresh', Integer),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quotas = Table('quotas', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('project_id', String(length=255)),
Column('resource', String(length=255), nullable=False),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
project_user_quotas = Table('project_user_quotas', meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('user_id',
String(length=255),
nullable=False),
Column('project_id',
String(length=255),
nullable=False),
Column('resource',
String(length=255),
nullable=False),
Column('hard_limit', Integer, nullable=True),
UniqueConstraint('user_id', 'project_id', 'resource',
'deleted', name=uniq_name),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
reservations = Table('reservations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('usage_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('delta', Integer, nullable=False),
Column('expire', DateTime),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
s3_images = Table('s3_images', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_instance_association = \
Table('security_group_instance_association', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('security_group_id', Integer),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_rules = Table('security_group_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('parent_group_id', Integer, ForeignKey('security_groups.id')),
Column('protocol', String(length=255)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('group_id', Integer, ForeignKey('security_groups.id')),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_groups = Table('security_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_default_rules = Table('security_group_default_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
services = Table('services', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('binary', String(length=255)),
Column('topic', String(length=255)),
Column('report_count', Integer, nullable=False),
Column('disabled', Boolean),
Column('deleted', Integer),
Column('disabled_reason', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshot_id_mappings = Table('snapshot_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshots = Table('snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('volume_id', String(length=36), nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('status', String(length=255)),
Column('progress', String(length=255)),
Column('volume_size', Integer),
Column('scheduled_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('task_name', String(length=255), nullable=False),
Column('state', String(length=255), nullable=False),
Column('host', String(length=255), nullable=False),
Column('period_beginning', DateTime, nullable=False),
Column('period_ending', DateTime, nullable=False),
Column('message', String(length=255), nullable=False),
Column('task_items', Integer),
Column('errors', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255)),
Column('network_id', Integer),
Column('uuid', String(length=36)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_id_mappings = Table('volume_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volumes = Table('volumes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('ec2_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('size', Integer),
Column('availability_zone', String(length=255)),
Column('mountpoint', String(length=255)),
Column('status', String(length=255)),
Column('attach_status', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('provider_location', String(length=256)),
Column('provider_auth', String(length=256)),
Column('snapshot_id', String(length=36)),
Column('volume_type_id', Integer),
Column('instance_uuid', String(length=36)),
Column('attach_time', DateTime),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_usage_cache = Table('volume_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(36), nullable=False),
Column('tot_last_refreshed', DateTime(timezone=False)),
Column('tot_reads', BigInteger(), default=0),
Column('tot_read_bytes', BigInteger(), default=0),
Column('tot_writes', BigInteger(), default=0),
Column('tot_write_bytes', BigInteger(), default=0),
Column('curr_last_refreshed', DateTime(timezone=False)),
Column('curr_reads', BigInteger(), default=0),
Column('curr_read_bytes', BigInteger(), default=0),
Column('curr_writes', BigInteger(), default=0),
Column('curr_write_bytes', BigInteger(), default=0),
Column('deleted', Integer),
Column("instance_uuid", String(length=36)),
Column("project_id", String(length=36)),
Column("user_id", String(length=36)),
Column("availability_zone", String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instances.create()
Index('project_id', instances.c.project_id).create()
Index('uuid', instances.c.uuid, unique=True).create()
# create all tables
tables = [aggregates, console_pools, instance_types,
security_groups, snapshots, volumes,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
block_device_mapping, bw_usage_cache, cells,
certificates, compute_node_stats, compute_nodes, consoles,
dns_domains, fixed_ips, floating_ips,
instance_faults, instance_id_mappings, instance_info_caches,
instance_metadata, instance_system_metadata,
instance_type_extra_specs, instance_type_projects,
instance_actions, instance_actions_events,
groups, group_metadata, group_policy, group_member,
iscsi_targets, key_pairs, migrations, networks,
pci_devices, provider_fw_rules, quota_classes, quota_usages,
quotas, project_user_quotas,
reservations, s3_images, security_group_instance_association,
security_group_rules, security_group_default_rules,
services, snapshot_id_mappings, task_log,
virtual_interfaces,
volume_id_mappings,
volume_usage_cache]
for table in tables:
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.exception(_LE('Exception while creating table.'))
raise
# task log unique constraint
task_log_uc = "uniq_task_log0task_name0host0period_beginning0period_ending"
task_log_cols = ('task_name', 'host', 'period_beginning', 'period_ending')
uc = UniqueConstraint(*task_log_cols, table=task_log, name=task_log_uc)
uc.create()
# networks unique constraint
UniqueConstraint('vlan', 'deleted', table=networks,
name='uniq_networks0vlan0deleted').create()
# instance_type_name constraint
UniqueConstraint('name', 'deleted', table=instance_types,
name='uniq_instance_types0name0deleted').create()
# flavorid unique constraint
UniqueConstraint('flavorid', 'deleted', table=instance_types,
name='uniq_instance_types0flavorid0deleted').create()
# keypair contraint
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').create()
# instance_type_projects constraint
inst_type_uc_name = 'uniq_instance_type_projects0instance_type_id0' + \
'project_id0deleted'
UniqueConstraint('instance_type_id', 'project_id', 'deleted',
table=instance_type_projects,
name=inst_type_uc_name).create()
# floating_ips unique constraint
UniqueConstraint('address', 'deleted',
table=floating_ips,
name='uniq_floating_ips0address0deleted').create()
# instance_info_caches
UniqueConstraint('instance_uuid',
table=instance_info_caches,
name='uniq_instance_info_caches0instance_uuid').create()
UniqueConstraint('address', 'deleted',
table=virtual_interfaces,
name='uniq_virtual_interfaces0address0deleted').create()
# cells
UniqueConstraint('name', 'deleted',
table=cells,
name='uniq_cells0name0deleted').create()
# security_groups
uc = UniqueConstraint('project_id', 'name', 'deleted',
table=security_groups,
name='uniq_security_groups0project_id0name0deleted')
uc.create()
# quotas
UniqueConstraint('project_id', 'resource', 'deleted',
table=quotas,
name='uniq_quotas0project_id0resource0deleted').create()
# fixed_ips
UniqueConstraint('address', 'deleted',
table=fixed_ips,
name='uniq_fixed_ips0address0deleted').create()
# services
UniqueConstraint('host', 'topic', 'deleted',
table=services,
name='uniq_services0host0topic0deleted').create()
UniqueConstraint('host', 'binary', 'deleted',
table=services,
name='uniq_services0host0binary0deleted').create()
# agent_builds
uc_name = 'uniq_agent_builds0hypervisor0os0architecture0deleted'
UniqueConstraint('hypervisor', 'os', 'architecture', 'deleted',
table=agent_builds,
name=uc_name).create()
uc_name = 'uniq_console_pools0host0console_type0compute_host0deleted'
UniqueConstraint('host', 'console_type', 'compute_host', 'deleted',
table=console_pools,
name=uc_name).create()
uc_name = 'uniq_aggregate_hosts0host0aggregate_id0deleted'
UniqueConstraint('host', 'aggregate_id', 'deleted',
table=aggregate_hosts,
name=uc_name).create()
uc_name = 'uniq_aggregate_metadata0aggregate_id0key0deleted'
UniqueConstraint('aggregate_id', 'key', 'deleted',
table=aggregate_metadata,
name=uc_name).create()
uc_name = 'uniq_instance_type_extra_specs0instance_type_id0key0deleted'
UniqueConstraint('instance_type_id', 'key', 'deleted',
table=instance_type_extra_specs,
name=uc_name).create()
# created first (to preserve ordering for schema diffs)
mysql_pre_indexes = [
Index('instance_type_id', instance_type_projects.c.instance_type_id),
Index('project_id', dns_domains.c.project_id),
Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
Index('address', fixed_ips.c.address),
Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
Index('instance_uuid', instance_system_metadata.c.instance_uuid),
Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
Index('snapshot_id', block_device_mapping.c.snapshot_id),
Index('usage_id', reservations.c.usage_id),
Index('virtual_interfaces_instance_uuid_fkey',
virtual_interfaces.c.instance_uuid),
Index('volume_id', block_device_mapping.c.volume_id),
Index('security_group_id',
security_group_instance_association.c.security_group_id),
]
# Common indexes (indexes we apply to all databases)
# NOTE: order specific for MySQL diff support
common_indexes = [
# aggregate_metadata
Index('aggregate_metadata_key_idx', aggregate_metadata.c.key),
# agent_builds
Index('agent_builds_hypervisor_os_arch_idx',
agent_builds.c.hypervisor,
agent_builds.c.os,
agent_builds.c.architecture),
# block_device_mapping
Index('block_device_mapping_instance_uuid_idx',
block_device_mapping.c.instance_uuid),
Index('block_device_mapping_instance_uuid_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
# NOTE(dprince): This is now a duplicate index on MySQL and needs to
# be removed there. We leave it here so the Index ordering
# matches on schema diffs (for MySQL).
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
Index(
'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
Index('block_device_mapping_instance_uuid_volume_id_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.volume_id),
# bw_usage_cache
Index('bw_usage_cache_uuid_start_period_idx',
bw_usage_cache.c.uuid, bw_usage_cache.c.start_period),
Index('certificates_project_id_deleted_idx',
certificates.c.project_id, certificates.c.deleted),
Index('certificates_user_id_deleted_idx', certificates.c.user_id,
certificates.c.deleted),
# compute_node_stats
Index('ix_compute_node_stats_compute_node_id',
compute_node_stats.c.compute_node_id),
Index('compute_node_stats_node_id_and_deleted_idx',
compute_node_stats.c.compute_node_id,
compute_node_stats.c.deleted),
# consoles
Index('consoles_instance_uuid_idx', consoles.c.instance_uuid),
# dns_domains
Index('dns_domains_domain_deleted_idx',
dns_domains.c.domain, dns_domains.c.deleted),
# fixed_ips
Index('fixed_ips_host_idx', fixed_ips.c.host),
Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id,
fixed_ips.c.host, fixed_ips.c.deleted),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
fixed_ips.c.address, fixed_ips.c.reserved,
fixed_ips.c.network_id, fixed_ips.c.deleted),
Index('fixed_ips_deleted_allocated_idx', fixed_ips.c.address,
fixed_ips.c.deleted, fixed_ips.c.allocated),
# floating_ips
Index('floating_ips_host_idx', floating_ips.c.host),
Index('floating_ips_project_id_idx', floating_ips.c.project_id),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
floating_ips.c.pool, floating_ips.c.deleted,
floating_ips.c.fixed_ip_id, floating_ips.c.project_id),
# group_member
Index('instance_group_member_instance_idx',
group_member.c.instance_id),
# group_metadata
Index('instance_group_metadata_key_idx', group_metadata.c.key),
# group_policy
Index('instance_group_policy_policy_idx', group_policy.c.policy),
# instances
Index('instances_reservation_id_idx',
instances.c.reservation_id),
Index('instances_terminated_at_launched_at_idx',
instances.c.terminated_at,
instances.c.launched_at),
Index('instances_task_state_updated_at_idx',
instances.c.task_state,
instances.c.updated_at),
Index('instances_host_deleted_idx', instances.c.host,
instances.c.deleted),
Index('instances_uuid_deleted_idx', instances.c.uuid,
instances.c.deleted),
Index('instances_host_node_deleted_idx', instances.c.host,
instances.c.node, instances.c.deleted),
Index('instances_host_deleted_cleaned_idx',
instances.c.host, instances.c.deleted,
instances.c.cleaned),
# instance_actions
Index('instance_uuid_idx', instance_actions.c.instance_uuid),
Index('request_id_idx', instance_actions.c.request_id),
# instance_faults
Index('instance_faults_host_idx', instance_faults.c.host),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
instance_faults.c.instance_uuid, instance_faults.c.deleted,
instance_faults.c.created_at),
# instance_id_mappings
Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid),
# instance_metadata
Index('instance_metadata_instance_uuid_idx',
instance_metadata.c.instance_uuid),
# instance_type_extra_specs
Index('instance_type_extra_specs_instance_type_id_key_idx',
instance_type_extra_specs.c.instance_type_id,
instance_type_extra_specs.c.key),
# iscsi_targets
Index('iscsi_targets_host_idx', iscsi_targets.c.host),
Index('iscsi_targets_host_volume_id_deleted_idx',
iscsi_targets.c.host, iscsi_targets.c.volume_id,
iscsi_targets.c.deleted),
# migrations
Index('migrations_by_host_nodes_and_status_idx',
migrations.c.deleted, migrations.c.source_compute,
migrations.c.dest_compute, migrations.c.source_node,
migrations.c.dest_node, migrations.c.status),
Index('migrations_instance_uuid_and_status_idx',
migrations.c.deleted, migrations.c.instance_uuid,
migrations.c.status),
# networks
Index('networks_host_idx', networks.c.host),
Index('networks_cidr_v6_idx', networks.c.cidr_v6),
Index('networks_bridge_deleted_idx', networks.c.bridge,
networks.c.deleted),
Index('networks_project_id_deleted_idx', networks.c.project_id,
networks.c.deleted),
Index('networks_uuid_project_id_deleted_idx',
networks.c.uuid, networks.c.project_id, networks.c.deleted),
Index('networks_vlan_deleted_idx', networks.c.vlan,
networks.c.deleted),
# project_user_quotas
Index('project_user_quotas_project_id_deleted_idx',
project_user_quotas.c.project_id,
project_user_quotas.c.deleted),
Index('project_user_quotas_user_id_deleted_idx',
project_user_quotas.c.user_id, project_user_quotas.c.deleted),
# reservations
Index('ix_reservations_project_id', reservations.c.project_id),
Index('ix_reservations_user_id_deleted',
reservations.c.user_id, reservations.c.deleted),
Index('reservations_uuid_idx', reservations.c.uuid),
# security_group_instance_association
Index('security_group_instance_association_instance_uuid_idx',
security_group_instance_association.c.instance_uuid),
# task_log
Index('ix_task_log_period_beginning', task_log.c.period_beginning),
Index('ix_task_log_host', task_log.c.host),
Index('ix_task_log_period_ending', task_log.c.period_ending),
# quota_classes
Index('ix_quota_classes_class_name', quota_classes.c.class_name),
# quota_usages
Index('ix_quota_usages_project_id', quota_usages.c.project_id),
Index('ix_quota_usages_user_id_deleted',
quota_usages.c.user_id, quota_usages.c.deleted),
# volumes
Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
]
# MySQL specific indexes
if migrate_engine.name == 'mysql':
for index in mysql_pre_indexes:
index.create(migrate_engine)
# mysql-specific index by leftmost 100 chars. (mysql gets angry if the
# index key length is too long.)
sql = ("create index migrations_by_host_nodes_and_status_idx ON "
"migrations (deleted, source_compute(100), dest_compute(100), "
"source_node(100), dest_node(100), status)")
migrate_engine.execute(sql)
# PostgreSQL specific indexes
if migrate_engine.name == 'postgresql':
Index('address', fixed_ips.c.address).create()
# NOTE(dprince): PostgreSQL doesn't allow duplicate indexes
# so we skip creation of select indexes (so schemas match exactly).
POSTGRES_INDEX_SKIPS = [
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
'block_device_mapping_instance_uuid_virtual_name_device_name_idx'
]
# NOTE(mriedem): DB2 doesn't allow duplicate indexes either.
DB2_INDEX_SKIPS = POSTGRES_INDEX_SKIPS
MYSQL_INDEX_SKIPS = [
# we create this one manually for MySQL above
'migrations_by_host_nodes_and_status_idx'
]
for index in common_indexes:
if ((migrate_engine.name == 'postgresql' and
index.name in POSTGRES_INDEX_SKIPS) or
(migrate_engine.name == 'mysql' and
index.name in MYSQL_INDEX_SKIPS) or
(migrate_engine.name == 'ibm_db_sa' and
index.name in DB2_INDEX_SKIPS)):
continue
else:
index.create(migrate_engine)
Index('project_id', dns_domains.c.project_id).drop
# Common foreign keys
fkeys = [
[[instance_type_projects.c.instance_type_id],
[instance_types.c.id],
'instance_type_projects_ibfk_1'],
[[iscsi_targets.c.volume_id],
[volumes.c.id],
'iscsi_targets_volume_id_fkey'],
[[reservations.c.usage_id],
[quota_usages.c.id],
'reservations_ibfk_1'],
[[security_group_instance_association.c.security_group_id],
[security_groups.c.id],
'security_group_instance_association_ibfk_1'],
[[compute_node_stats.c.compute_node_id],
[compute_nodes.c.id],
'fk_compute_node_stats_compute_node_id'],
[[compute_nodes.c.service_id],
[services.c.id],
'fk_compute_nodes_service_id'],
]
# NOTE(mriedem): DB2 doesn't support unique constraints on columns that
# are nullable so we can only create foreign keys on unique constraints
# that actually exist, which excludes any FK on instances.uuid.
if migrate_engine.name != 'ibm_db_sa':
secgroup_instance_association_instance_uuid_fkey = (
'security_group_instance_association_instance_uuid_fkey')
fkeys.extend(
[
[[fixed_ips.c.instance_uuid],
[instances.c.uuid],
'fixed_ips_instance_uuid_fkey'],
[[block_device_mapping.c.instance_uuid],
[instances.c.uuid],
'block_device_mapping_instance_uuid_fkey'],
[[instance_info_caches.c.instance_uuid],
[instances.c.uuid],
'instance_info_caches_instance_uuid_fkey'],
[[instance_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_metadata_instance_uuid_fkey'],
[[instance_system_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_system_metadata_ibfk_1'],
[[security_group_instance_association.c.instance_uuid],
[instances.c.uuid],
secgroup_instance_association_instance_uuid_fkey],
[[virtual_interfaces.c.instance_uuid],
[instances.c.uuid],
'virtual_interfaces_instance_uuid_fkey'],
[[instance_actions.c.instance_uuid],
[instances.c.uuid],
'fk_instance_actions_instance_uuid'],
[[instance_faults.c.instance_uuid],
[instances.c.uuid],
'fk_instance_faults_instance_uuid'],
[[migrations.c.instance_uuid],
[instances.c.uuid],
'fk_migrations_instance_uuid']
])
for fkey_pair in fkeys:
if migrate_engine.name in ('mysql', 'ibm_db_sa'):
# For MySQL and DB2 we name our fkeys explicitly
# so they match Havana
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1],
name=fkey_pair[2])
fkey.create()
elif migrate_engine.name == 'postgresql':
# PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1])
fkey.create()
if migrate_engine.name == 'mysql':
# In Folsom we explicitly converted migrate_version to UTF8.
migrate_engine.execute(
'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
# Set default DB charset to UTF8.
migrate_engine.execute(
'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
migrate_engine.url.database)
_create_shadow_tables(migrate_engine)
# populate initial instance types
_populate_instance_types(instance_types)
_create_dump_tables(migrate_engine)
|
apache-2.0
|
jeffmacinnes/pyneal
|
tests/pyneal_tests/test_resultsServer.py
|
1
|
2920
|
import os
from os.path import join
import sys
import socket
import json
import numpy as np
import pyneal_helper_tools as helper_tools
# get dictionary with relevant paths for tests within this module
paths = helper_tools.get_pyneal_test_paths()
if paths['pynealDir'] not in sys.path:
sys.path.insert(0, paths['pynealDir'])
socketPort = 5556
from src.resultsServer import ResultsServer
# Tests for functions within the resultsServer module
def test_resultsServer():
""" tests pyneal.src.resultsServer """
# create settings dictionary
settings = {'resultsServerPort': socketPort,
'pynealHost': '127.0.0.1',
'seriesOutputDir': paths['testDataDir'],
'launchDashboard': False}
resultsServer = ResultsServer(settings)
resultsServer.daemon = True
resultsServer.start()
# test updating the results server with results
fakeResults = np.array([1000.1, 1000.2, 1000.3])
for volIdx in range(3):
thisResult = {'testResult': fakeResults[volIdx]}
resultsServer.updateResults(volIdx, thisResult)
# test retrieving values from the results server
for volIdx in range(3):
result = resultsServer.requestLookup(volIdx)
assert result['testResult'] == fakeResults[volIdx]
# test sending a request from a remote socket connection
requestedVolIdx = 1 # vol that exists
result = fakeEndUserRequest(requestedVolIdx)
assert result['foundResults'] == True
assert result['testResult'] == fakeResults[requestedVolIdx]
requestedVolIdx = 99 # vol that doesn't exist
result = fakeEndUserRequest(requestedVolIdx)
assert result['foundResults'] == False
# test saving data
resultsServer.saveResults()
os.remove(join(paths['testDataDir'], 'results.json'))
# assuming nothing as crashed, close the socket
resultsServer.killServer()
def fakeEndUserRequest(requestedVolIdx):
""" Function to mimic the behavior of the end user, which sends a request
to the results server
Parameters
----------
volIdx : int
the volIdx of the volume you'd like to request results for
"""
# socket configs
host = '127.0.0.1' # ip of where Pyneal is running
# connect to the results server of Pyneal
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((host, socketPort))
# send request for volume number. Request must by 4-char string representing
# the volume number requested
request = str(requestedVolIdx).zfill(4)
clientSocket.send(request.encode())
# now read the full response from the server
resp = b''
while True:
serverData = clientSocket.recv(1024)
if serverData:
resp += serverData
else:
break
clientSocket.close()
# format at JSON
resp = json.loads(resp.decode())
return resp
|
mit
|
reachalpineswift/frappe-bench
|
frappe/email/email_body.py
|
8
|
8351
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.pdf import get_pdf
from frappe.email.smtp import get_outgoing_email_account
from frappe.utils import get_url, scrub_urls, strip, expand_relative_urls, cint
import email.utils
from markdown2 import markdown
def get_email(recipients, sender='', msg='', subject='[No Subject]',
text_content = None, footer=None, print_html=None, formatted=None, attachments=None,
content=None, reply_to=None, cc=()):
"""send an html email as multipart with attachments and all"""
content = content or msg
emailobj = EMail(sender, recipients, subject, reply_to=reply_to, cc=cc)
if not content.strip().startswith("<"):
content = markdown(content)
emailobj.set_html(content, text_content, footer=footer, print_html=print_html, formatted=formatted)
if isinstance(attachments, dict):
attachments = [attachments]
for attach in (attachments or []):
emailobj.add_attachment(**attach)
return emailobj
class EMail:
"""
Wrapper on the email module. Email object represents emails to be sent to the client.
Also provides a clean way to add binary `FileData` attachments
Also sets all messages as multipart/alternative for cleaner reading in text-only clients
"""
def __init__(self, sender='', recipients=(), subject='', alternative=0, reply_to=None, cc=()):
from email.mime.multipart import MIMEMultipart
from email import Charset
Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')
if isinstance(recipients, basestring):
recipients = recipients.replace(';', ',').replace('\n', '')
recipients = recipients.split(',')
# remove null
recipients = filter(None, (strip(r) for r in recipients))
self.sender = sender
self.reply_to = reply_to or sender
self.recipients = recipients
self.subject = subject
self.msg_root = MIMEMultipart('mixed')
self.msg_multipart = MIMEMultipart('alternative')
self.msg_root.attach(self.msg_multipart)
self.cc = cc or []
self.html_set = False
def set_html(self, message, text_content = None, footer=None, print_html=None, formatted=None):
"""Attach message in the html portion of multipart/alternative"""
if not formatted:
formatted = get_formatted_html(self.subject, message, footer, print_html)
# this is the first html part of a multi-part message,
# convert to text well
if not self.html_set:
if text_content:
self.set_text(expand_relative_urls(text_content))
else:
self.set_html_as_text(expand_relative_urls(formatted))
self.set_part_html(formatted)
self.html_set = True
def set_text(self, message):
"""
Attach message in the text portion of multipart/alternative
"""
from email.mime.text import MIMEText
part = MIMEText(message, 'plain', 'utf-8')
self.msg_multipart.attach(part)
def set_part_html(self, message):
from email.mime.text import MIMEText
part = MIMEText(message, 'html', 'utf-8')
self.msg_multipart.attach(part)
def set_html_as_text(self, html):
"""return html2text"""
import HTMLParser
from html2text import html2text
try:
self.set_text(html2text(html))
except HTMLParser.HTMLParseError:
pass
def set_message(self, message, mime_type='text/html', as_attachment=0, filename='attachment.html'):
"""Append the message with MIME content to the root node (as attachment)"""
from email.mime.text import MIMEText
maintype, subtype = mime_type.split('/')
part = MIMEText(message, _subtype = subtype)
if as_attachment:
part.add_header('Content-Disposition', 'attachment', filename=filename)
self.msg_root.attach(part)
def attach_file(self, n):
"""attach a file from the `FileData` table"""
from frappe.utils.file_manager import get_file
res = get_file(n)
if not res:
return
self.add_attachment(res[0], res[1])
def add_attachment(self, fname, fcontent, content_type=None):
"""add attachment"""
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
import mimetypes
if not content_type:
content_type, encoding = mimetypes.guess_type(fname)
if content_type is None:
# No guess could be made, or the file is encoded (compressed), so
# use a generic bag-of-bits type.
content_type = 'application/octet-stream'
maintype, subtype = content_type.split('/', 1)
if maintype == 'text':
# Note: we should handle calculating the charset
if isinstance(fcontent, unicode):
fcontent = fcontent.encode("utf-8")
part = MIMEText(fcontent, _subtype=subtype, _charset="utf-8")
elif maintype == 'image':
part = MIMEImage(fcontent, _subtype=subtype)
elif maintype == 'audio':
part = MIMEAudio(fcontent, _subtype=subtype)
else:
part = MIMEBase(maintype, subtype)
part.set_payload(fcontent)
# Encode the payload using Base64
from email import encoders
encoders.encode_base64(part)
# Set the filename parameter
if fname:
part.add_header(b'Content-Disposition',
("attachment; filename=\"%s\"" % fname).encode('utf-8'))
self.msg_root.attach(part)
def add_pdf_attachment(self, name, html, options=None):
self.add_attachment(name, get_pdf(html, options), 'application/octet-stream')
def get_default_sender(self):
email_account = get_outgoing_email_account()
return email.utils.formataddr((email_account.name, email_account.get("sender") or email_account.get("email_id")))
def validate(self):
"""validate the email ids"""
from frappe.utils import validate_email_add
if not self.sender:
self.sender = self.get_default_sender()
validate_email_add(strip(self.sender), True)
self.reply_to = validate_email_add(strip(self.reply_to) or self.sender, True)
self.recipients = [strip(r) for r in self.recipients]
self.cc = [strip(r) for r in self.cc]
for e in self.recipients + (self.cc or []):
validate_email_add(e, True)
def set_message_id(self, message_id):
self.msg_root["Message-Id"] = "<{0}@{1}>".format(message_id, frappe.local.site)
def make(self):
"""build into msg_root"""
headers = {
"Subject": strip(self.subject).encode("utf-8"),
"From": self.sender.encode("utf-8"),
"To": ', '.join(self.recipients).encode("utf-8"),
"Date": email.utils.formatdate(),
"Reply-To": self.reply_to.encode("utf-8") if self.reply_to else None,
"CC": ', '.join(self.cc).encode("utf-8") if self.cc else None,
b'X-Frappe-Site': get_url().encode('utf-8'),
}
# reset headers as values may be changed.
for key, val in headers.iteritems():
if self.msg_root.has_key(key):
del self.msg_root[key]
self.msg_root[key] = val
# call hook to enable apps to modify msg_root before sending
for hook in frappe.get_hooks("make_email_body_message"):
frappe.get_attr(hook)(self)
def as_string(self):
"""validate, build message and convert to string"""
self.validate()
self.make()
return self.msg_root.as_string()
def get_formatted_html(subject, message, footer=None, print_html=None):
# imported here to avoid cyclic import
message = scrub_urls(message)
email_account = get_outgoing_email_account(False)
rendered_email = frappe.get_template("templates/emails/standard.html").render({
"content": message,
"signature": get_signature(email_account),
"footer": get_footer(email_account, footer),
"title": subject,
"print_html": print_html,
"subject": subject
})
return rendered_email
def get_signature(email_account):
if email_account and email_account.add_signature and email_account.signature:
return "<br><br>" + email_account.signature
else:
return ""
def get_footer(email_account, footer=None):
"""append a footer (signature)"""
footer = footer or ""
if email_account and email_account.footer:
footer += email_account.footer
footer += "<!--unsubscribe link here-->"
company_address = frappe.db.get_default("email_footer_address")
if company_address:
footer += '<div style="text-align: center; color: #8d99a6">{0}</div>'\
.format(company_address.replace("\n", "<br>"))
if not cint(frappe.db.get_default("disable_standard_email_footer")):
for default_mail_footer in frappe.get_hooks("default_mail_footer"):
footer += default_mail_footer
return footer
|
mit
|
silentfuzzle/calibre
|
src/calibre/gui2/store/stores/nexto_plugin.py
|
15
|
3905
|
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 3 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011-2013, Tomasz Długosz <[email protected]>'
__docformat__ = 'restructuredtext en'
import re
import urllib
from contextlib import closing
from lxml import html
from PyQt5.Qt import QUrl
from calibre import browser, url_slash_cleaner
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class NextoStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
pid = '155711'
url = 'http://www.nexto.pl/ebooki_c1015.xml'
detail_url = None
if detail_item:
book_id = re.search(r'p[0-9]*\.xml\Z', detail_item)
book_id = book_id.group(0).replace('.xml','').replace('p','')
if book_id:
detail_url = 'http://www.nexto.pl/rf/pr?p=' + book_id + '&pid=' + pid
if external or self.config.get('open_external', False):
open_url(QUrl(url_slash_cleaner(detail_url if detail_url else url)))
else:
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.nexto.pl/szukaj.xml?search-clause=' + urllib.quote_plus(query) + '&scid=1015'
br = browser()
offset=0
counter = max_results
while counter:
with closing(br.open(url + '&_offset=' + str(offset), timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//ul[@class="productslist"]/li'):
if counter <= 0:
break
id = ''.join(data.xpath('.//div[@class="cover_container"]/a[1]/@href'))
if not id:
continue
price = ''.join(data.xpath('.//strong[@class="nprice"]/text()'))
cover_url = ''.join(data.xpath('.//img[@class="cover"]/@src'))
cover_url = re.sub(r'%2F', '/', cover_url)
cover_url = re.sub(r'widthMax=120&heightMax=200', 'widthMax=64&heightMax=64', cover_url)
title = ''.join(data.xpath('.//a[@class="title"]/text()'))
title = re.sub(r' - ebook$', '', title)
formats = ', '.join(data.xpath('.//ul[@class="formats_available"]/li//b/text()'))
DrmFree = re.search(r'znak', formats)
formats = re.sub(r'\ ?\(.+?\)', '', formats)
author = ''
with closing(br.open('http://www.nexto.pl/' + id.strip(), timeout=timeout/4)) as nf:
idata = html.fromstring(nf.read())
author = ', '.join(idata.xpath('//div[@class="basic_data"]/p[1]/b/a/text()'))
counter -= 1
s = SearchResult()
s.cover_url = cover_url if cover_url[:4] == 'http' else 'http://www.nexto.pl' + cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
s.detail_item = id.strip()
s.drm = SearchResult.DRM_UNLOCKED if DrmFree else SearchResult.DRM_LOCKED
s.formats = formats.upper().strip()
yield s
if not doc.xpath('//div[@class="listnavigator"]//a[@class="next"]'):
break
offset+=10
|
gpl-3.0
|
lfz/Guided-Denoise
|
Attackset/fgsm_ensv3_resv2_inresv2_random/nets/resnet_v1.py
|
33
|
15274
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = tf.contrib.slim
@slim.add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None,
use_bounded_activations=False):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
use_bounded_activations: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=tf.nn.relu6 if use_bounded_activations else None,
scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
if use_bounded_activations:
# Use clip_by_value to simulate bandpass activation.
residual = tf.clip_by_value(residual, -6.0, 6.0)
output = tf.nn.relu6(shortcut + residual)
else:
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether is training or not.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v1_50.default_image_size = resnet_v1.default_image_size
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v1_101.default_image_size = resnet_v1.default_image_size
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v1_152.default_image_size = resnet_v1.default_image_size
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
resnet_v1_200.default_image_size = resnet_v1.default_image_size
|
apache-2.0
|
studio666/gnuradio
|
gr-digital/examples/gen_whitener.py
|
58
|
1814
|
#!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru
from gnuradio import blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
class my_graph(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
src = blocks.lfsr_32k_source_s()
head = blocks.head(gr.sizeof_short, 2048)
self.dst = blocks.vector_sink_s()
self.connect(src, head, self.dst)
if __name__ == '__main__':
try:
tb = my_graph()
tb.run()
f = sys.stdout
i = 0
for s in tb.dst.data():
f.write("%3d, " % (s & 0xff,))
f.write("%3d, " % ((s >> 8) & 0xff,))
i = i+2
if i % 16 == 0:
f.write('\n')
except KeyboardInterrupt:
pass
|
gpl-3.0
|
stpx/canto-curses
|
plugins/theme-default.py
|
2
|
2716
|
# Canto Default Theme
# Defined as a plugin to use as a base for other themes.
FORCE_COLORS = False
FORCE_STYLE = False
from canto_next.plugins import check_program
check_program("canto-curses")
from canto_curses.story import StoryPlugin
from canto_curses.tag import TagPlugin
from canto_curses.theme import prep_for_display
from canto_curses.color import cc
cmds = []
if FORCE_COLORS:
cmds.append("reset-config color")
if FORCE_STYLE:
cmds.append("reset-config style")
class CantoThemeStoryDefault(StoryPlugin):
def __init__(self, story):
self.story = story
self.plugin_attrs = { "eval" : self.eval }
def eval(self):
story = self.story
s = ""
if "read" in story.content["canto-state"]:
s += cc("read")
else:
s += cc("unread")
if story.marked:
s += cc("marked") + "[*]"
if story.selected:
s += cc("selected")
s += prep_for_display(story.content["title"])
if story.selected:
s += cc.end("selected")
if story.marked:
s += cc.end("marked")
if "read" in story.content["canto-state"]:
s += cc.end("read")
else:
s += cc.end("unread")
return s
class CantoThemeTagDefault(TagPlugin):
def __init__(self, tag):
self.tag = tag
self.plugin_attrs = { "eval" : self.eval }
def eval(self):
tag = self.tag
# Make sure to strip out the category from category:name
str_tag = tag.tag.split(':', 1)[1]
unread = len([s for s in tag\
if "canto-state" not in s.content or\
"read" not in s.content["canto-state"]])
s = ""
if tag.selected:
s += cc("selected")
if tag.collapsed:
s += "[+]"
else:
s += "[-]"
s += " " + str_tag + " "
s += "[" + cc("unread") + str(unread) + cc.end("unread") + "]"
if tag.updates_pending:
s += " [" + cc("pending") + str(tag.updates_pending) + cc.end("pending") + "]"
if tag.selected:
s += cc.end("selected")
return s
# Stolen from autocmd.py, but simple enough to copy instead of introducing a
# dependency.
from canto_curses.gui import GuiPlugin
from canto_next.hooks import on_hook
class AutoCmdGui(GuiPlugin):
def __init__(self, gui):
self.plugin_attrs = {}
self.gui = gui
on_hook("curses_start", self.do_cmds)
def do_cmds(self):
self.gui.callbacks["set_var"]("quiet", True)
for cmd in cmds:
self.gui.issue_cmd(cmd)
self.gui.callbacks["set_var"]("quiet", False)
|
gpl-2.0
|
jonge-democraten/website
|
create_local_settings.py
|
1
|
1573
|
#!/usr/bin/env python3
import codecs
import os
import random
import shutil
import string
import tempfile
LOCAL_SETTINGS_PATH = './website/local_settings.py'
LOCAL_SETTINGS_EXAMPLE_PATH = './website/local_settings_example.py'
def main():
if os.path.exists(LOCAL_SETTINGS_PATH):
print('ERROR: ' + LOCAL_SETTINGS_PATH +
' already exists! Please remove this file manually if you intent to overwrite it.')
return
shutil.copyfile(LOCAL_SETTINGS_EXAMPLE_PATH, LOCAL_SETTINGS_PATH)
secret_key_random = generate_random_secret_key()
replace(LOCAL_SETTINGS_PATH, "SECRET_KEY = ''", "SECRET_KEY = '" + secret_key_random + "'")
def replace(source_file_path, pattern, substring):
fh, target_file_path = tempfile.mkstemp()
with codecs.open(target_file_path, 'w', 'utf-8') as target_file:
with codecs.open(source_file_path, 'r', 'utf-8') as source_file:
for line in source_file:
target_file.write(line.replace(pattern, substring))
os.remove(source_file_path)
shutil.copy(target_file_path, source_file_path)
def generate_random_secret_key():
# source: https://gist.github.com/mattseymour/9205591
# Get ascii Characters numbers and punctuation (minus quote characters as they could terminate string).
chars = ''.join([string.ascii_letters, string.digits, string.punctuation]).replace('\'', '').replace('"', '').replace('\\', '')
secret_key = ''.join([random.SystemRandom().choice(chars) for i in range(50)])
return secret_key
if __name__ == "__main__":
main()
|
mit
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/django/utils/lru_cache.py
|
94
|
7648
|
try:
from functools import lru_cache
except ImportError:
# backport of Python's 3.3 lru_cache, written by Raymond Hettinger and
# licensed under MIT license, from:
# <http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/>
# Should be removed when Django only supports Python 3.2 and above.
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: https://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
|
mit
|
hfp/tensorflow-xsmm
|
tensorflow/compiler/tests/xla_ops_test.py
|
10
|
12658
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA op wrappers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.compiler.xla import xla_data_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class XlaOpsTest(xla_test.XLATestCase, parameterized.TestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected,
equality_fn=None):
with self.test_session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
result = session.run(output, feeds)
if not equality_fn:
equality_fn = self.assertAllClose
equality_fn(result, expected, rtol=1e-3)
def testAdd(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.add,
args=(np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype)),
expected=np.array([5, 7, 9], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(0,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 9], [14, 15]], dtype=dtype))
self._assertOpOutputMatchesExpected(
lambda x, y: xla.add(x, y, broadcast_dims=(1,)),
args=(np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([7, 11], dtype=dtype)),
expected=np.array([[8, 13], [10, 15]], dtype=dtype))
def testBroadcast(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.broadcast(x, (7, 42)),
args=(v,),
expected=np.tile(v, (7, 42, 1, 1)))
def testShiftRightLogical(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_logical,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0x0FFFFFFF, 1], dtype=np.uint32))
def testShiftRightArithmetic(self):
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([-1, 16], dtype=np.int32), np.int32(4)),
expected=np.array([-1, 1], dtype=np.int32))
self._assertOpOutputMatchesExpected(
xla.shift_right_arithmetic,
args=(np.array([0xFFFFFFFF, 16], dtype=np.uint32), np.uint32(4)),
expected=np.array([0xFFFFFFFF, 1], dtype=np.uint32))
PRECISION_VALUES = (None, xla_data_pb2.PrecisionConfig.DEFAULT,
xla_data_pb2.PrecisionConfig.HIGH,
xla_data_pb2.PrecisionConfig.HIGHEST)
@parameterized.parameters(*PRECISION_VALUES)
def testConv(self, precision):
for dtype in set(self.float_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
def conv_1d_fn(lhs, rhs):
dnums = xla_data_pb2.ConvolutionDimensionNumbers()
num_spatial_dims = 1
dnums.input_batch_dimension = 0
dnums.input_feature_dimension = 1
dnums.output_batch_dimension = 0
dnums.output_feature_dimension = 1
dnums.kernel_output_feature_dimension = 0
dnums.kernel_input_feature_dimension = 1
dnums.input_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.kernel_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
dnums.output_spatial_dimensions.extend(range(2, 2 + num_spatial_dims))
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.conv(
lhs,
rhs,
window_strides=(1,),
padding=((2, 1),),
lhs_dilation=(1,),
rhs_dilation=(2,),
dimension_numbers=dnums)
self._assertOpOutputMatchesExpected(
conv_1d_fn,
args=(
np.array([[[3, 4, 5, 6]]], dtype=dtype),
np.array([[[-2, -3]]], dtype=dtype),
),
expected=np.array([[[-9, -12, -21, -26, -10]]], dtype=dtype))
@parameterized.parameters(*PRECISION_VALUES)
def testDotGeneral(self, precision):
for dtype in self.float_types:
def dot_fn(lhs, rhs):
dnums = xla_data_pb2.DotDimensionNumbers()
dnums.lhs_contracting_dimensions.append(2)
dnums.rhs_contracting_dimensions.append(1)
dnums.lhs_batch_dimensions.append(0)
dnums.rhs_batch_dimensions.append(0)
precision_config = None
if precision:
precision_config = xla_data_pb2.PrecisionConfig()
precision_config.operand_precision.extend([precision, precision])
return xla.dot_general(
lhs,
rhs,
dimension_numbers=dnums,
precision_config=precision_config)
lhs = np.array(
[
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
], dtype=dtype)
rhs = np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype)
self._assertOpOutputMatchesExpected(
dot_fn,
args=(lhs, rhs),
expected=np.array(
[
[[9, 12, 15], [19, 26, 33]],
[[95, 106, 117], [129, 144, 159]],
],
dtype=dtype))
def testNeg(self):
for dtype in self.numeric_types - {np.uint8, np.int8}:
self._assertOpOutputMatchesExpected(
xla.neg,
args=(np.array([1, 2, 3], dtype=dtype),),
expected=np.array([-1, -2, -3], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
def pad_fn(x):
return xla.pad(
x,
padding_value=7,
padding_low=[2, 1],
padding_high=[1, 2],
padding_interior=[1, 0])
self._assertOpOutputMatchesExpected(
pad_fn,
args=(np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2]),),
expected=np.array(
[[7, 7, 7, 7, 7], [7, 7, 7, 7, 7], [7, 0, 1, 7, 7],
[7, 7, 7, 7, 7], [7, 2, 3, 7, 7], [7, 7, 7, 7, 7]],
dtype=dtype))
def testReduce(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def sum_reducer(x, y):
return x + y
def sum_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=0, dimensions_to_reduce=dims, reducer=sum_reducer)
return fn
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([12, 15, 18, 21], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([6, 22, 38], dtype=dtype))
self._assertOpOutputMatchesExpected(
sum_reduction(dims=[0, 1]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=dtype(66))
@function.Defun(dtype, dtype)
def mul_reducer(x, y):
return x * y
def mul_reduction(dims):
def fn(x):
return xla.reduce(
x, init_value=1, dimensions_to_reduce=dims, reducer=mul_reducer)
return fn
self._assertOpOutputMatchesExpected(
mul_reduction(dims=[0]),
args=(np.arange(12, dtype=np.int32).astype(dtype).reshape([3, 4]),),
expected=np.array([0, 45, 120, 231], dtype=dtype))
def testSelectAndScatter(self):
for dtype in set(self.numeric_types).intersection(
set([dtypes.bfloat16.as_numpy_dtype, np.float32])):
@function.Defun(dtype, dtype)
def add_scatter(x, y):
return x + y
@function.Defun(dtype, dtype)
def ge_select(x, y):
return x >= y
def test_fn(operand, source):
return xla.select_and_scatter(
operand,
window_dimensions=[2, 3, 1, 1],
window_strides=[2, 2, 1, 1],
padding=[[0, 0]] * 4,
source=source,
init_value=0,
select=ge_select,
scatter=add_scatter)
self._assertOpOutputMatchesExpected(
test_fn,
args=(np.array(
[[7, 2, 5, 3, 8], [3, 8, 9, 3, 4], [1, 5, 7, 5, 6],
[0, 6, 2, 10, 2]],
dtype=dtype).reshape((4, 5, 1, 1)),
np.array([[2, 6], [3, 1]], dtype=dtype).reshape((2, 2, 1, 1))),
expected=np.array(
[[0, 0, 0, 0, 0], [0, 0, 8, 0, 0], [0, 0, 3, 0, 0],
[0, 0, 0, 1, 0]],
dtype=dtype).reshape((4, 5, 1, 1)))
def testTranspose(self):
for dtype in self.numeric_types:
v = np.arange(4, dtype=np.int32).astype(dtype).reshape([2, 2])
self._assertOpOutputMatchesExpected(
lambda x: xla.transpose(x, [1, 0]), args=(v,), expected=v.T)
def testDynamicSlice(self):
for dtype in self.numeric_types:
self._assertOpOutputMatchesExpected(
xla.dynamic_slice,
args=(np.arange(1000,
dtype=np.int32).astype(dtype).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3, 2])),
expected=np.array(
np.array([[[573, 574], [583, 584], [593, 594]],
[[673, 674], [683, 684], [693, 694]]]),
dtype=dtype))
def testDynamicSliceWithIncorrectStartIndicesShape(self):
with self.test_session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7]), np.array([2, 3, 4]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'start_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and start_indices has shape \[2\].*'))
def testDynamicSliceWithIncorrectSizeIndicesShape(self):
with self.test_session() as session:
with self.test_scope():
output = xla.dynamic_slice(
np.arange(1000, dtype=np.int32).reshape([10, 10, 10]),
np.array([5, 7, 3]), np.array([2, 3]))
with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
session.run(output)
self.assertRegexpMatches(
invalid_arg_error.exception.message,
(r'size_indices must be a vector with length equal to input rank, '
r'but input rank is 3 and size_indices has shape \[2\].*'))
if __name__ == '__main__':
googletest.main()
|
apache-2.0
|
adeverteuil/backup
|
backup/test/test_snapshot.py
|
1
|
10125
|
# Alexandre's backup script
# Copyright © 2014 Alexandre A. de Verteuil
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import io
import queue
import threading
import unittest
from .basic_setup import BasicSetup
from ..snapshot import *
from ..locking import *
from ..dry_run import if_not_dry_run
class TestSnapshot(BasicSetup):
def test_init(self):
now = datetime.datetime.now()
s = Snapshot(self.testdest, "interval")
self.assertIsNone(s.timestamp)
self.assertTrue(s.path.startswith(self.testdest))
self.assertEqual(s.interval, "interval")
self.assertTrue(s.path.endswith(".wip"), msg=s.path)
def test_set_timestamp(self):
t = datetime.datetime(2014, 7, 1, 10, 10)
s = Snapshot(self.testdest, "interval")
s.timestamp = t
self.assertEqual(s.timestamp, t)
self.assertTrue(s.path.endswith("2014-07-01T10:10"), msg=s.path)
# Now test renaming directory.
s = Snapshot(self.testdest, "interval")
s.mkdir()
with s:
s.status = Status.syncing
self.assertEqual(
sorted(os.listdir(self.testdest)),
[".interval.wip.lock", ".interval.wip.status", "interval.wip"]
)
s.timestamp = t
self.assertEqual(
sorted(os.listdir(self.testdest)),
[
".interval.2014-07-01T10:10.lock",
".interval.2014-07-01T10:10.status",
"interval.2014-07-01T10:10",
]
)
def test_set_interval(self):
t = datetime.datetime(2014, 7, 1, 10, 10)
s = Snapshot(self.testdest, "hourly")
s.interval = "daily"
s.timestamp = t
self.assertEqual(s.interval, "daily")
self.assertTrue(s.path.endswith("daily.2014-07-01T10:10"), msg=s.path)
# Now test renaming directory.
s = Snapshot(self.testdest, "hourly")
s.mkdir()
with s:
s.timestamp = t
self.assertEqual(
sorted(os.listdir(self.testdest)),
[".hourly.2014-07-01T10:10.lock", "hourly.2014-07-01T10:10"]
)
s.interval = "daily"
self.assertEqual(
sorted(os.listdir(self.testdest)),
[
".daily.2014-07-01T10:10.lock",
"daily.2014-07-01T10:10",
]
)
def test_snapshot_from_index(self):
os.chdir(self.testdest)
for d in range(1, 5):
os.mkdir("daily.2014-07-{:02}T00:00".format(d))
# Create irrelevant directories to make sure they don't cause trouble.
os.mkdir("hourly.2014-07-01T00:00")
os.mkdir("some_random_directory")
for d in range(4):
s = Snapshot.from_index(self.testdest, "daily", d)
self.assertEqual(s.timestamp, datetime.datetime(2014, 7, 4-d))
with self.assertRaises(IndexError):
s = Snapshot.from_index(self.testdest, "daily", 4)
s = Snapshot.from_index(self.testdest, "daily", -1)
self.assertEqual(s.timestamp, datetime.datetime(2014, 7, 1))
def test_snapshot_from_path(self):
os.chdir(self.testdest)
os.mkdir("daily.2014-07-01T00:00")
os.mkdir("daily.wip")
s = Snapshot.from_path(os.path.join(self.testdest, "daily.wip"))
self.assertIsNone(s.timestamp)
self.assertEqual(
s.path,
os.path.join(self.testdest, "daily.wip")
)
s = Snapshot.from_path(
os.path.join(self.testdest, "daily.2014-07-01T00:00")
)
self.assertEqual(s.timestamp, datetime.datetime(2014, 7, 1))
self.assertEqual(
s.path,
os.path.join(self.testdest, "daily.2014-07-01T00:00")
)
def test_locking(self):
# Setup
s1 = Snapshot(self.testdest, "interval")
s1.mkdir()
s1.acquire()
# Test existence of lock file.
self.assertTrue(os.access(s1.lockfile, os.R_OK), msg=s1.lockfile)
# Test acquiring the lock with another snapshot object.
with self.assertRaises(AlreadyLocked):
s2 = Snapshot.from_index(self.testdest, "interval", 0)
s2.acquire()
# Attempt to acquire the lock a second time from another thread.
def grab_lock(lockable, q):
try:
lockable.acquire()
except Exception as err:
q.put(err)
else:
q.put(None)
with self.assertRaises(AlreadyLocked):
# do the test here.
q = queue.Queue()
t = threading.Thread(
target=grab_lock,
args=(s1, q),
)
t.start()
err = q.get()
if err is not None:
raise err
# Test breaking a lock.
self.assertTrue(s2.is_locked())
self.assertFalse(s2.i_am_locking())
with self.assertRaises(NotMyLock):
s2.release()
s2.break_lock()
self.assertFalse(s2.is_locked())
# Test context manager.
with s2:
with self.assertRaises(AlreadyLocked):
s1.acquire()
# Test releasing an unlocked lock.
with self.assertRaises(AlreadyUnlocked):
s2.release()
def test_status_cycle(self):
# void -> blank -> syncing -> complete -> deleting -> void
s = Snapshot(self.testdest, "interval")
self.assertEqual(s.status, Status.void)
s.mkdir()
self.assertEqual(s.status, Status.blank)
s.status = Status.syncing
self.assertEqual(s.status, Status.syncing)
s.status = Status.complete
self.assertEqual(s.status, Status.complete)
s.status = Status.deleting
self.assertEqual(s.status, Status.deleting)
s.status = Status.deleted
# Changing the status of a deleted snapshot should raise an exception.
statuses = "void blank syncing flagged complete deleting"
for status in (statuses.split(" ")):
with self.assertRaises(RuntimeError):
s.status = Status[status]
def test_status_flagged(self):
s = Snapshot(self.testdest, "interval")
s.mkdir()
s.status = Status.syncing
self.assertEqual(s.status, Status.syncing)
s.status = Status.flagged
self.assertEqual(s.status, Status.flagged)
s.status = Status.syncing
self.assertEqual(s.status, Status.flagged)
s.status = Status.deleting
self.assertEqual(s.status, Status.deleting)
s.status = Status.deleted
self.assertEqual(s.status, Status.deleted)
with self.assertRaises(RuntimeError):
s.status = Status.flagged
def test_delete(self):
# Try to delete a void snapshot.
s = Snapshot(self.testdest, "interval")
with self.assertRaises(RuntimeError):
with s:
s.delete()
s.mkdir()
with s:
s.delete()
def test_existing_dirty_snapshot(self):
# If a snapshot is instantiated for which a dirty directory exists,
# Snapshot should set the proper status.
# Create 4 snapshot directories:
# 2014-07-01 -- Status.blank
# 2014-07-02 -- Status.syncing
# 2014-07-03 -- Status.complete
# 2014-07-04 -- Status.deleting
os.chdir(self.testdest)
os.mkdir("daily.2014-07-04T00:00")
os.mkdir("daily.2014-07-03T00:00")
with open(".daily.2014-07-03T00:00.status", "w") as f:
f.write(str(Status.syncing.value))
os.mkdir("daily.2014-07-02T00:00")
open("daily.2014-07-02T00:00/a", "wb").close()
os.mkdir("daily.2014-07-01T00:00")
with open(".daily.2014-07-01T00:00.status", "w") as f:
f.write(str(Status.deleting.value))
snapshots = []
for d in range(4):
snapshots.append(Snapshot.from_index(self.testdest, "daily", d))
self.assertEqual(snapshots[0].status, Status.blank)
self.assertEqual(snapshots[1].status, Status.syncing)
self.assertEqual(snapshots[2].status, Status.complete)
self.assertEqual(snapshots[3].status, Status.deleting)
def test_dry_run(self):
# Directory must not be created.
s = Snapshot(self.testdest, "interval", datetime.datetime(2014, 7, 1))
if_not_dry_run.dry_run = True
s.mkdir()
s.status = Status.syncing
self.assertEqual(os.listdir(self.testdest), [])
# Reset
if_not_dry_run.dry_run = False
s = Snapshot(self.testdest, "interval", datetime.datetime(2014, 7, 1))
s.mkdir()
# Directory must not be deleted.
if_not_dry_run.dry_run = True
s.delete()
self.assertEqual(
os.listdir(self.testdest),
["interval.2014-07-01T00:00"],
)
# Reset
s._status = Status.blank
# Directory must not be renamed.
s.timestamp = datetime.datetime(2014, 7, 2)
self.assertEqual(
os.listdir(self.testdest),
["interval.2014-07-01T00:00"],
)
# But the internal timestamp must still be changed.
self.assertEqual(
s.timestamp,
datetime.datetime(2014, 7, 2),
)
|
gpl-3.0
|
emil-mi/exercises-in-programming-style
|
32-trinity/tf-32-reactive.py
|
17
|
1545
|
#!/usr/bin/env python
import sys, re, operator, collections
class WordFrequenciesModel:
""" Models the data. In this case, we're only interested
in words and their frequencies as an end result """
freqs = {}
def __init__(self):
self._observers = []
def register(self, obs):
self._observers.append(obs)
def update(self, path_to_file):
try:
stopwords = set(open('../stop_words.txt').read().split(','))
words = re.findall('[a-z]{2,}', open(path_to_file).read().lower())
self.freqs = collections.Counter(w for w in words if w not in stopwords)
for obs in self._observers:
obs.render()
except IOError:
print "File not found"
self.freqs = {}
class WordFrequenciesView:
def __init__(self, model):
self._model = model
model.register(self)
def render(self):
sorted_freqs = sorted(self._model.freqs.iteritems(), key=operator.itemgetter(1), reverse=True)
for (w, c) in sorted_freqs[:25]:
print w, '-', c
class WordFrequencyController:
def __init__(self, model, view):
self._model, self._view = model, view
def run(self):
self._model.update(sys.argv[1])
while True:
print "Next file: "
sys.stdout.flush()
filename = sys.stdin.readline().strip()
self._model.update(filename)
m = WordFrequenciesModel()
v = WordFrequenciesView(m)
c = WordFrequencyController(m, v)
c.run()
|
mit
|
naparuba/kunai
|
data/global-configuration/packs/mongodb/collectors/pymongo/bulk.py
|
17
|
25864
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The bulk write operations interface.
.. versionadded:: 2.7
"""
from bson.objectid import ObjectId
from bson.raw_bson import RawBSONDocument
from bson.son import SON
from pymongo.common import (validate_is_mapping,
validate_is_document_type,
validate_ok_for_replace,
validate_ok_for_update)
from pymongo.collation import validate_collation_or_none
from pymongo.errors import (BulkWriteError,
ConfigurationError,
DocumentTooLarge,
InvalidOperation,
OperationFailure)
from pymongo.message import (_INSERT, _UPDATE, _DELETE,
_do_batched_write_command,
_randint,
_BulkWriteContext)
from pymongo.write_concern import WriteConcern
_DELETE_ALL = 0
_DELETE_ONE = 1
# For backwards compatibility. See MongoDB src/mongo/base/error_codes.err
_BAD_VALUE = 2
_UNKNOWN_ERROR = 8
_WRITE_CONCERN_ERROR = 64
_COMMANDS = ('insert', 'update', 'delete')
# These string literals are used when we create fake server return
# documents client side. We use unicode literals in python 2.x to
# match the actual return values from the server.
_UID = u"_id"
_UCODE = u"code"
_UERRMSG = u"errmsg"
_UINDEX = u"index"
_UOP = u"op"
class _Run(object):
"""Represents a batch of write operations.
"""
def __init__(self, op_type):
"""Initialize a new Run object.
"""
self.op_type = op_type
self.index_map = []
self.ops = []
def index(self, idx):
"""Get the original index of an operation in this run.
:Parameters:
- `idx`: The Run index that maps to the original index.
"""
return self.index_map[idx]
def add(self, original_index, operation):
"""Add an operation to this Run instance.
:Parameters:
- `original_index`: The original index of this operation
within a larger bulk operation.
- `operation`: The operation document.
"""
self.index_map.append(original_index)
self.ops.append(operation)
def _make_error(index, code, errmsg, operation):
"""Create and return an error document.
"""
return {
_UINDEX: index,
_UCODE: code,
_UERRMSG: errmsg,
_UOP: operation
}
def _merge_legacy(run, full_result, result, index):
"""Merge a result from a legacy opcode into the full results.
"""
affected = result.get('n', 0)
errmsg = result.get("errmsg", result.get("err", ""))
if errmsg:
# wtimeout is not considered a hard failure in
# MongoDB 2.6 so don't treat it like one here.
if result.get("wtimeout"):
error_doc = {'errmsg': errmsg, 'code': _WRITE_CONCERN_ERROR}
full_result['writeConcernErrors'].append(error_doc)
else:
code = result.get("code", _UNKNOWN_ERROR)
error = _make_error(run.index(index), code, errmsg, run.ops[index])
if "errInfo" in result:
error["errInfo"] = result["errInfo"]
full_result["writeErrors"].append(error)
return
if run.op_type == _INSERT:
full_result['nInserted'] += 1
elif run.op_type == _UPDATE:
if "upserted" in result:
doc = {_UINDEX: run.index(index), _UID: result["upserted"]}
full_result["upserted"].append(doc)
full_result['nUpserted'] += affected
# Versions of MongoDB before 2.6 don't return the _id for an
# upsert if _id is not an ObjectId.
elif result.get("updatedExisting") is False and affected == 1:
op = run.ops[index]
# If _id is in both the update document *and* the query spec
# the update document _id takes precedence.
_id = op['u'].get('_id', op['q'].get('_id'))
doc = {_UINDEX: run.index(index), _UID: _id}
full_result["upserted"].append(doc)
full_result['nUpserted'] += affected
else:
full_result['nMatched'] += affected
elif run.op_type == _DELETE:
full_result['nRemoved'] += affected
def _merge_command(run, full_result, results):
"""Merge a group of results from write commands into the full result.
"""
for offset, result in results:
affected = result.get("n", 0)
if run.op_type == _INSERT:
full_result["nInserted"] += affected
elif run.op_type == _DELETE:
full_result["nRemoved"] += affected
elif run.op_type == _UPDATE:
upserted = result.get("upserted")
if upserted:
if isinstance(upserted, list):
n_upserted = len(upserted)
for doc in upserted:
doc["index"] = run.index(doc["index"] + offset)
full_result["upserted"].extend(upserted)
else:
n_upserted = 1
index = run.index(offset)
doc = {_UINDEX: index, _UID: upserted}
full_result["upserted"].append(doc)
full_result["nUpserted"] += n_upserted
full_result["nMatched"] += (affected - n_upserted)
else:
full_result["nMatched"] += affected
n_modified = result.get("nModified")
# SERVER-13001 - in a mixed sharded cluster a call to
# update could return nModified (>= 2.6) or not (<= 2.4).
# If any call does not return nModified we can't report
# a valid final count so omit the field completely.
if n_modified is not None and "nModified" in full_result:
full_result["nModified"] += n_modified
else:
full_result.pop("nModified", None)
write_errors = result.get("writeErrors")
if write_errors:
for doc in write_errors:
# Leave the server response intact for APM.
replacement = doc.copy()
idx = doc["index"] + offset
replacement["index"] = run.index(idx)
# Add the failed operation to the error document.
replacement[_UOP] = run.ops[idx]
full_result["writeErrors"].append(replacement)
wc_error = result.get("writeConcernError")
if wc_error:
full_result["writeConcernErrors"].append(wc_error)
class _Bulk(object):
"""The private guts of the bulk write API.
"""
def __init__(self, collection, ordered, bypass_document_validation):
"""Initialize a _Bulk instance.
"""
self.collection = collection.with_options(
codec_options=collection.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict))
self.ordered = ordered
self.ops = []
self.name = "%s.%s" % (collection.database.name, collection.name)
self.namespace = collection.database.name + '.$cmd'
self.executed = False
self.bypass_doc_val = bypass_document_validation
self.uses_collation = False
def add_insert(self, document):
"""Add an insert document to the list of ops.
"""
validate_is_document_type("document", document)
# Generate ObjectId client side.
if not (isinstance(document, RawBSONDocument) or '_id' in document):
document['_id'] = ObjectId()
self.ops.append((_INSERT, document))
def add_update(self, selector, update, multi=False, upsert=False,
collation=None):
"""Create an update document and add it to the list of ops.
"""
validate_ok_for_update(update)
cmd = SON([('q', selector), ('u', update),
('multi', multi), ('upsert', upsert)])
collation = validate_collation_or_none(collation)
if collation is not None:
self.uses_collation = True
cmd['collation'] = collation
self.ops.append((_UPDATE, cmd))
def add_replace(self, selector, replacement, upsert=False,
collation=None):
"""Create a replace document and add it to the list of ops.
"""
validate_ok_for_replace(replacement)
cmd = SON([('q', selector), ('u', replacement),
('multi', False), ('upsert', upsert)])
collation = validate_collation_or_none(collation)
if collation is not None:
self.uses_collation = True
cmd['collation'] = collation
self.ops.append((_UPDATE, cmd))
def add_delete(self, selector, limit, collation=None):
"""Create a delete document and add it to the list of ops.
"""
cmd = SON([('q', selector), ('limit', limit)])
collation = validate_collation_or_none(collation)
if collation is not None:
self.uses_collation = True
cmd['collation'] = collation
self.ops.append((_DELETE, cmd))
def gen_ordered(self):
"""Generate batches of operations, batched by type of
operation, in the order **provided**.
"""
run = None
for idx, (op_type, operation) in enumerate(self.ops):
if run is None:
run = _Run(op_type)
elif run.op_type != op_type:
yield run
run = _Run(op_type)
run.add(idx, operation)
yield run
def gen_unordered(self):
"""Generate batches of operations, batched by type of
operation, in arbitrary order.
"""
operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)]
for idx, (op_type, operation) in enumerate(self.ops):
operations[op_type].add(idx, operation)
for run in operations:
if run.ops:
yield run
def execute_command(self, sock_info, generator, write_concern):
"""Execute using write commands.
"""
# nModified is only reported for write commands, not legacy ops.
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nModified": 0,
"nRemoved": 0,
"upserted": [],
}
op_id = _randint()
db_name = self.collection.database.name
listeners = self.collection.database.client._event_listeners
for run in generator:
cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
('ordered', self.ordered)])
if write_concern.document:
cmd['writeConcern'] = write_concern.document
if self.bypass_doc_val and sock_info.max_wire_version >= 4:
cmd['bypassDocumentValidation'] = True
bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id, listeners)
results = _do_batched_write_command(
self.namespace, run.op_type, cmd,
run.ops, True, self.collection.codec_options, bwc)
_merge_command(run, full_result, results)
# We're supposed to continue if errors are
# at the write concern level (e.g. wtimeout)
if self.ordered and full_result['writeErrors']:
break
if full_result["writeErrors"] or full_result["writeConcernErrors"]:
if full_result['writeErrors']:
full_result['writeErrors'].sort(
key=lambda error: error['index'])
raise BulkWriteError(full_result)
return full_result
def execute_no_results(self, sock_info, generator):
"""Execute all operations, returning no results (w=0).
"""
# Cannot have both unacknowledged write and bypass document validation.
if self.bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
coll = self.collection
# If ordered is True we have to send GLE or use write
# commands so we can abort on the first error.
write_concern = WriteConcern(w=int(self.ordered))
op_id = _randint()
for run in generator:
try:
if run.op_type == _INSERT:
coll._insert(
sock_info,
run.ops,
self.ordered,
write_concern=write_concern,
op_id=op_id,
bypass_doc_val=self.bypass_doc_val)
elif run.op_type == _UPDATE:
for operation in run.ops:
doc = operation['u']
check_keys = True
if doc and next(iter(doc)).startswith('$'):
check_keys = False
coll._update(
sock_info,
operation['q'],
doc,
operation['upsert'],
check_keys,
operation['multi'],
write_concern=write_concern,
op_id=op_id,
ordered=self.ordered,
bypass_doc_val=self.bypass_doc_val)
else:
for operation in run.ops:
coll._delete(sock_info,
operation['q'],
not operation['limit'],
write_concern,
op_id,
self.ordered)
except OperationFailure:
if self.ordered:
break
def execute_legacy(self, sock_info, generator, write_concern):
"""Execute using legacy wire protocol ops.
"""
coll = self.collection
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nRemoved": 0,
"upserted": [],
}
op_id = _randint()
stop = False
for run in generator:
for idx, operation in enumerate(run.ops):
try:
# To do per-operation reporting we have to do ops one
# at a time. That means the performance of bulk insert
# will be slower here than calling Collection.insert()
if run.op_type == _INSERT:
coll._insert(sock_info,
operation,
self.ordered,
write_concern=write_concern,
op_id=op_id)
result = {}
elif run.op_type == _UPDATE:
doc = operation['u']
check_keys = True
if doc and next(iter(doc)).startswith('$'):
check_keys = False
result = coll._update(sock_info,
operation['q'],
doc,
operation['upsert'],
check_keys,
operation['multi'],
write_concern=write_concern,
op_id=op_id,
ordered=self.ordered)
else:
result = coll._delete(sock_info,
operation['q'],
not operation['limit'],
write_concern,
op_id,
self.ordered)
_merge_legacy(run, full_result, result, idx)
except DocumentTooLarge as exc:
# MongoDB 2.6 uses error code 2 for "too large".
error = _make_error(
run.index(idx), _BAD_VALUE, str(exc), operation)
full_result['writeErrors'].append(error)
if self.ordered:
stop = True
break
except OperationFailure as exc:
if not exc.details:
# Some error not related to the write operation
# (e.g. kerberos failure). Re-raise immediately.
raise
_merge_legacy(run, full_result, exc.details, idx)
# We're supposed to continue if errors are
# at the write concern level (e.g. wtimeout)
if self.ordered and full_result["writeErrors"]:
stop = True
break
if stop:
break
if full_result["writeErrors"] or full_result['writeConcernErrors']:
if full_result['writeErrors']:
full_result['writeErrors'].sort(
key=lambda error: error['index'])
raise BulkWriteError(full_result)
return full_result
def execute(self, write_concern):
"""Execute operations.
"""
if not self.ops:
raise InvalidOperation('No operations to execute')
if self.executed:
raise InvalidOperation('Bulk operations can '
'only be executed once.')
self.executed = True
write_concern = (WriteConcern(**write_concern) if
write_concern else self.collection.write_concern)
if self.ordered:
generator = self.gen_ordered()
else:
generator = self.gen_unordered()
client = self.collection.database.client
with client._socket_for_writes() as sock_info:
if sock_info.max_wire_version < 5 and self.uses_collation:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use a collation.')
if not write_concern.acknowledged:
if self.uses_collation:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
self.execute_no_results(sock_info, generator)
elif sock_info.max_wire_version > 1:
return self.execute_command(sock_info, generator, write_concern)
else:
return self.execute_legacy(sock_info, generator, write_concern)
class BulkUpsertOperation(object):
"""An interface for adding upsert operations.
"""
__slots__ = ('__selector', '__bulk', '__collation')
def __init__(self, selector, bulk, collation):
self.__selector = selector
self.__bulk = bulk
self.__collation = collation
def update_one(self, update):
"""Update one document matching the selector.
:Parameters:
- `update` (dict): the update operations to apply
"""
self.__bulk.add_update(self.__selector,
update, multi=False, upsert=True,
collation=self.__collation)
def update(self, update):
"""Update all documents matching the selector.
:Parameters:
- `update` (dict): the update operations to apply
"""
self.__bulk.add_update(self.__selector,
update, multi=True, upsert=True,
collation=self.__collation)
def replace_one(self, replacement):
"""Replace one entire document matching the selector criteria.
:Parameters:
- `replacement` (dict): the replacement document
"""
self.__bulk.add_replace(self.__selector, replacement, upsert=True,
collation=self.__collation)
class BulkWriteOperation(object):
"""An interface for adding update or remove operations.
"""
__slots__ = ('__selector', '__bulk', '__collation')
def __init__(self, selector, bulk, collation):
self.__selector = selector
self.__bulk = bulk
self.__collation = collation
def update_one(self, update):
"""Update one document matching the selector criteria.
:Parameters:
- `update` (dict): the update operations to apply
"""
self.__bulk.add_update(self.__selector, update, multi=False,
collation=self.__collation)
def update(self, update):
"""Update all documents matching the selector criteria.
:Parameters:
- `update` (dict): the update operations to apply
"""
self.__bulk.add_update(self.__selector, update, multi=True,
collation=self.__collation)
def replace_one(self, replacement):
"""Replace one entire document matching the selector criteria.
:Parameters:
- `replacement` (dict): the replacement document
"""
self.__bulk.add_replace(self.__selector, replacement,
collation=self.__collation)
def remove_one(self):
"""Remove a single document matching the selector criteria.
"""
self.__bulk.add_delete(self.__selector, _DELETE_ONE,
collation=self.__collation)
def remove(self):
"""Remove all documents matching the selector criteria.
"""
self.__bulk.add_delete(self.__selector, _DELETE_ALL,
collation=self.__collation)
def upsert(self):
"""Specify that all chained update operations should be
upserts.
:Returns:
- A :class:`BulkUpsertOperation` instance, used to add
update operations to this bulk operation.
"""
return BulkUpsertOperation(self.__selector, self.__bulk,
self.__collation)
class BulkOperationBuilder(object):
"""An interface for executing a batch of write operations.
"""
__slots__ = '__bulk'
def __init__(self, collection, ordered=True,
bypass_document_validation=False):
"""Initialize a new BulkOperationBuilder instance.
:Parameters:
- `collection`: A :class:`~pymongo.collection.Collection` instance.
- `ordered` (optional): If ``True`` all operations will be executed
serially, in the order provided, and the entire execution will
abort on the first error. If ``False`` operations will be executed
in arbitrary order (possibly in parallel on the server), reporting
any errors that occurred after attempting all operations. Defaults
to ``True``.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.2
Added bypass_document_validation support
"""
self.__bulk = _Bulk(collection, ordered, bypass_document_validation)
def find(self, selector, collation=None):
"""Specify selection criteria for bulk operations.
:Parameters:
- `selector` (dict): the selection criteria for update
and remove operations.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
:Returns:
- A :class:`BulkWriteOperation` instance, used to add
update and remove operations to this bulk operation.
.. versionchanged:: 3.4
Added the `collation` option.
"""
validate_is_mapping("selector", selector)
return BulkWriteOperation(selector, self.__bulk, collation)
def insert(self, document):
"""Insert a single document.
:Parameters:
- `document` (dict): the document to insert
.. seealso:: :ref:`writes-and-ids`
"""
self.__bulk.add_insert(document)
def execute(self, write_concern=None):
"""Execute all provided operations.
:Parameters:
- write_concern (optional): the write concern for this bulk
execution.
"""
if write_concern is not None:
validate_is_mapping("write_concern", write_concern)
return self.__bulk.execute(write_concern)
|
mit
|
thiagolcmelo/segregation
|
segregation/muraki/models.py
|
1
|
2276
|
from django.db import models
# Create your models here.
class Alloy(models.Model):
name = models.CharField("Alloy's name", max_length = 30)
alloy_size = (
('d', 'Double'),
('d', 'Ternary')
)
def __str__(self):
return self.name
class Parameter(models.Model):
alloy = models.ForeignKey(Alloy, verbose_name="The related alloy")
a0 = models.FloatField("Lattice parameter")
ac = models.FloatField("Conductions's hydrostatic deformation potential")
av = models.FloatField("Valence's hydrostatic deformation potential")
b = models.FloatField("Deformation potential for tetragonal distorion")
c11 = models.FloatField("Elastic constant")
c12 = models.FloatField("Elastic constant")
me = models.FloatField("Electron effective mass")
mhh = models.FloatField("Heavy-hole effective mass", null = True)
mlh = models.FloatField("Light-hole effective mass", null = True)
eg2 = models.FloatField("Gap energy at 2 K")
eg77 = models.FloatField("Gap energy at 77 K")
eg300 = models.FloatField("Gap energy at 300 K")
def __str__(self):
return self.alloy.name + "'s parameters"
class Interpolation(models.Model):
alloy = models.ForeignKey(Alloy, verbose_name="The related alloy")
a0 = models.CharField("Lattice parameter", max_length = 200)
ac = models.CharField("Conductions's hydrostatic deformation potential", max_length = 200)
av = models.CharField("Valence's hydrostatic deformation potential", max_length = 200)
b = models.CharField("Deformation potential for tetragonal distorion", max_length = 200)
c11 = models.CharField("Elastic constant", max_length = 200)
c12 = models.CharField("Elastic constant", max_length = 200)
me = models.CharField("Electron effective mass", max_length = 200)
mhh = models.CharField("Heavy-hole effective mass", max_length = 200, null = True)
mlh = models.CharField("Light-hole effective mass", max_length = 200, null = True)
eg2 = models.CharField("Gap energy at 2 K", max_length = 200)
eg77 = models.CharField("Gap energy at 77 K", max_length = 200)
eg300 = models.CharField("Gap energy at 300 K", max_length = 200)
def __str__(self):
return self.alloy.name + "'s interpolations"
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.