repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
adyliu/mysql-connector-python | python23/django/introspection.py | 1 | 5112 | # MySQL Connector/Python - MySQL driver written in Python.
import re
from django.db.backends import BaseDatabaseIntrospection
from mysql.connector.constants import FieldType
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) "
r"REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FieldType.BLOB: 'TextField',
FieldType.DECIMAL: 'DecimalField',
FieldType.NEWDECIMAL: 'DecimalField',
FieldType.DATE: 'DateField',
FieldType.DATETIME: 'DateTimeField',
FieldType.DOUBLE: 'FloatField',
FieldType.FLOAT: 'FloatField',
FieldType.INT24: 'IntegerField',
FieldType.LONG: 'IntegerField',
FieldType.LONGLONG: 'BigIntegerField',
FieldType.SHORT: 'IntegerField',
FieldType.STRING: 'CharField',
FieldType.TIMESTAMP: 'DateTimeField',
FieldType.TINY: 'IntegerField',
FieldType.TINY_BLOB: 'TextField',
FieldType.MEDIUM_BLOB: 'TextField',
FieldType.LONG_BLOB: 'TextField',
FieldType.VAR_STRING: 'CharField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Returns a description of the table, with the DB-API cursor.description
interface."
"""
# varchar length returned by cursor.description is an internal length,
# not visible length (#5725), use information_schema database to fix
# this
cursor.execute("""
SELECT column_name, character_maximum_length
FROM information_schema.columns
WHERE table_name = %s AND table_schema = DATABASE()
AND character_maximum_length IS NOT NULL""", [table_name])
length_map = dict(cursor.fetchall())
cursor.execute("SELECT * FROM {0} LIMIT 1"
"".format(self.connection.ops.quote_name(table_name)))
return [line[:3] + (length_map.get(line[0], line[3]),) + line[4:]
for line in cursor.description]
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(
self.get_table_description(cursor, table_name))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table,
other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = self.get_key_columns(cursor, table_name)
relations = {}
for my_fieldname, other_table, other_field in constraints:
other_field_index = self._name_to_index(cursor,
other_table)[other_field]
my_field_index = my_field_dict[my_fieldname]
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
key_columns = []
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
cursor.execute("SHOW INDEX FROM {0}"
"".format(self.connection.ops.quote_name(table_name)))
# Do a two-pass search for indexes: on first pass check which indexes
# are multicolumn, on second pass check which single-column indexes
# are present.
rows = list(cursor.fetchall())
multicol_indexes = set()
for row in rows:
if row[3] > 1:
multicol_indexes.add(row[2])
indexes = {}
for row in rows:
if row[2] in multicol_indexes:
continue
indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'),
'unique': not bool(row[1])}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table
"""
for column in self.get_indexes(cursor, table_name).iteritems():
if column[1]['primary_key']:
return column[0]
return None
| gpl-2.0 |
reinout/django | tests/forms_tests/field_tests/test_datetimefield.py | 98 | 5103 | import datetime
from django.forms import DateTimeField, ValidationError
from django.test import SimpleTestCase
class DateTimeFieldTest(SimpleTestCase):
def test_datetimefield_1(self):
f = DateTimeField()
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59))
)
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200))
)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006-10-25 14:30:45.0002'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('2006-10-25 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006-10-25 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('2006-10-25'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/2006 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/2006 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/2006 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/2006'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('10/25/06 14:30:45.000200'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean('10/25/06 14:30:45'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30:00'))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('10/25/06 14:30'))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean('10/25/06'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'"):
f.clean('hello')
with self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'"):
f.clean('2006-10-25 4:30 p.m.')
def test_datetimefield_2(self):
f = DateTimeField(input_formats=['%Y %m %d %I:%M %p'])
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(datetime.date(2006, 10, 25)))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(datetime.datetime(2006, 10, 25, 14, 30)))
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59))
)
self.assertEqual(
datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200))
)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean('2006 10 25 2:30 PM'))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'"):
f.clean('2006-10-25 14:30:45')
def test_datetimefield_3(self):
f = DateTimeField(required=False)
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
def test_datetimefield_4(self):
f = DateTimeField()
# Test whitespace stripping behavior (#5714)
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 2006-10-25 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 2006-10-25 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/2006 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30), f.clean(' 10/25/2006 14:30 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/2006 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45), f.clean(' 10/25/06 14:30:45 '))
self.assertEqual(datetime.datetime(2006, 10, 25, 0, 0), f.clean(' 10/25/06 '))
with self.assertRaisesMessage(ValidationError, "'Enter a valid date/time.'"):
f.clean(' ')
def test_datetimefield_5(self):
f = DateTimeField(input_formats=['%Y.%m.%d %H:%M:%S.%f'])
self.assertEqual(datetime.datetime(2006, 10, 25, 14, 30, 45, 200), f.clean('2006.10.25 14:30:45.0002'))
def test_datetimefield_changed(self):
format = '%Y %m %d %I:%M %p'
f = DateTimeField(input_formats=[format])
d = datetime.datetime(2006, 9, 17, 14, 30, 0)
self.assertFalse(f.has_changed(d, '2006 09 17 2:30 PM'))
| bsd-3-clause |
reinout/django | tests/test_client/test_conditional_content_removal.py | 131 | 1958 | import gzip
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.test import SimpleTestCase
from django.test.client import conditional_content_removal
class ConditionalContentTests(SimpleTestCase):
def test_conditional_content_removal(self):
"""
Content is removed from regular and streaming responses with a
status_code of 100-199, 204, 304, or a method of "HEAD".
"""
req = HttpRequest()
# Do nothing for 200 responses.
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'abc')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'abc')
# Strip content for some status codes.
for status_code in (100, 150, 199, 204, 304):
res = HttpResponse('abc', status=status_code)
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'], status=status_code)
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Issue #20472
abc = gzip.compress(b'abc')
res = HttpResponse(abc, status=304)
res['Content-Encoding'] = 'gzip'
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse([abc], status=304)
res['Content-Encoding'] = 'gzip'
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Strip content for HEAD requests.
req.method = 'HEAD'
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
| bsd-3-clause |
MrHohn/kubernetes | hack/update_owners.py | 15 | 5166 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import csv
import re
import json
import os
import random
import sys
import time
import urllib2
import zlib
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
OWNERS_PATH = os.path.abspath(
os.path.join(BASE_DIR, '..', 'test', 'test_owners.csv'))
GCS_URL_BASE = 'https://storage.googleapis.com/kubernetes-test-history/'
SKIP_MAINTAINERS = {
'aronchick', 'bgrant0607-nocc', 'goltermann', 'sarahnovotny'}
def get_test_history():
url = time.strftime(GCS_URL_BASE + 'logs/%Y-%m-%d.json')
resp = urllib2.urlopen(url)
content = resp.read()
if resp.headers.get('content-encoding') == 'gzip':
content = zlib.decompress(content, 15 | 16)
return json.loads(content)
def normalize(name):
name = re.sub(r'\[.*?\]|\{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def load_owners(fname):
owners = {}
with open(fname) as f:
for n, (name, owner, random_assignment) in enumerate(csv.reader(f)):
if n == 0:
continue # header
owners[normalize(name)] = (owner, int(random_assignment))
return owners
def write_owners(fname, owners):
with open(fname, 'w') as f:
out = csv.writer(f, lineterminator='\n')
out.writerow(['name', 'owner', 'auto-assigned'])
for name, (owner, random_assignment) in sorted(owners.items()):
out.writerow([name, owner, int(random_assignment)])
def get_maintainers():
# Github doesn't seem to support team membership listing without a key with
# org admin privileges. Instead, we do it manually:
# Open https://github.com/orgs/kubernetes/teams/kubernetes-maintainers
# Run this in the js console:
# [].slice.call(document.querySelectorAll('.team-member-username a')).map(
# e => e.textContent.trim())
ret = {"a-robinson", "alex-mohr", "amygdala", "andyzheng0831", "apelisse",
"aronchick", "ArtfulCoder", "bgrant0607", "bgrant0607-nocc",
"bprashanth", "brendandburns", "caesarxuchao", "childsb", "cjcullen",
"david-mcmahon", "davidopp", "dchen1107", "deads2k", "derekwaynecarr",
"dubstack", "eparis", "erictune", "fabioy", "fejta", "fgrzadkowski",
"freehan", "ghodss", "girishkalele", "gmarek", "goltermann",
"grodrigues3", "hurf", "ingvagabund", "ixdy",
"jackgr", "janetkuo", "jbeda", "jdef", "jingxu97", "jlowdermilk",
"jsafrane", "jszczepkowski", "justinsb", "kargakis", "karlkfi",
"kelseyhightower", "kevin-wangzefeng", "krousey", "lavalamp",
"liggitt", "luxas", "madhusudancs", "maisem", "mansoorj", "matchstick",
"mikedanese", "mml", "mtaufen", "mwielgus", "ncdc", "nikhiljindal",
"piosz", "pmorie", "pwittrock", "Q-Lee", "quinton-hoole", "Random-Liu",
"rmmh", "roberthbailey", "ronnielai", "saad-ali", "sarahnovotny",
"smarterclayton", "soltysh", "spxtr", "sttts", "swagiaal", "thockin",
"timothysc", "timstclair", "tmrts", "vishh", "vulpecula", "wojtek-t",
"xiang90", "yifan-gu", "yujuhong", "zmerlynn"}
return sorted(ret - SKIP_MAINTAINERS)
def main():
test_history = get_test_history()
test_names = sorted(set(map(normalize, test_history['test_names'])))
owners = load_owners(OWNERS_PATH)
outdated_tests = sorted(set(owners) - set(test_names))
new_tests = sorted(set(test_names) - set(owners))
maintainers = get_maintainers()
print '# OUTDATED TESTS (%d):' % len(outdated_tests)
print '\n'.join(outdated_tests)
print '# NEW TESTS (%d):' % len(new_tests)
print '\n'.join(new_tests)
for name in outdated_tests:
owners.pop(name)
print '# UNEXPECTED MAINTAINERS ',
print '(randomly assigned, but not in kubernetes-maintainers)'
for name, (owner, random_assignment) in sorted(owners.iteritems()):
if random_assignment and owner not in maintainers:
print '%-16s %s' % (owner, name)
owners.pop(name)
print
owner_counts = collections.Counter(
owner for name, (owner, random) in owners.iteritems()
if owner in maintainers)
for test_name in set(test_names) - set(owners):
new_owner, _count = random.choice(owner_counts.most_common()[-4:])
owner_counts[new_owner] += 1
owners[test_name] = (new_owner, True)
print '# Tests per maintainer:'
for owner, count in owner_counts.most_common():
print '%-20s %3d' % (owner, count)
write_owners(OWNERS_PATH + '.new', owners)
if __name__ == '__main__':
main()
| apache-2.0 |
replicatorg/ReplicatorG | skein_engines/skeinforge-50/fabmetheus_utilities/geometry/geometry_tools/path_elements/arc.py | 13 | 1966 | """
Arc vertexes.
From:
http://www.w3.org/TR/SVG/paths.html#PathDataEllipticalArcCommands
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import svg_reader
import math
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getArcPath(elementNode):
"Get the arc path.rx ry x-axis-rotation large-arc-flag sweep-flag"
begin = elementNode.getPreviousVertex(Vector3())
end = evaluate.getVector3FromElementNode(elementNode)
largeArcFlag = evaluate.getEvaluatedBoolean(True, elementNode, 'largeArcFlag')
radius = lineation.getComplexByPrefix(elementNode, 'radius', complex(1.0, 1.0))
sweepFlag = evaluate.getEvaluatedBoolean(True, elementNode, 'sweepFlag')
xAxisRotation = math.radians(evaluate.getEvaluatedFloat(0.0, elementNode, 'xAxisRotation'))
arcComplexes = svg_reader.getArcComplexes(begin.dropAxis(), end.dropAxis(), largeArcFlag, radius, sweepFlag, xAxisRotation)
path = []
if len(arcComplexes) < 1:
return []
incrementZ = (end.z - begin.z) / float(len(arcComplexes))
z = begin.z
for pointIndex in xrange(len(arcComplexes)):
pointComplex = arcComplexes[pointIndex]
z += incrementZ
path.append(Vector3(pointComplex.real, pointComplex.imag, z))
if len(path) > 0:
path[-1] = end
return path
def processElementNode(elementNode):
"Process the xml element."
elementNode.parentNode.xmlObject.vertexes += getArcPath(elementNode)
| gpl-2.0 |
Fermi-Dirac/mathtests | pyggel/particle.py | 1 | 12345 | """
pyggle.particle
This library (PYGGEL) is licensed under the LGPL by Matthew Roe and PYGGEL contributors.
The particle module contains classes for creating and rendering particle effects.
A simple fire effect is included.
"""
from .include import *
from . import data, image, misc, data
import random
import numpy
from .misc import randfloat
class Particle3D(object):
"""A simple 3d particle."""
def __init__(self, parent, behavior):
"""Create the particle.
parent must be the emitter class creating the particle
behavior must be the behavior class that will handle how the particle behaves"""
self.parent = parent
self.parent.particles.append(self)
self.extra_data = {}
self.behavior = behavior
self.image = self.behavior.image.copy()
self.behavior.register_particle(self)
self.age = 0
def update(self):
"""Update the particle."""
self.behavior.particle_update(self)
def render(self, camera):
"""Render the particle.
camera must be None or the camera object the scene is using"""
self.update()
self.image.render(camera)
def kill(self):
"""Destroy the particle."""
self.parent.particles.remove(self)
class Emitter3D(object):
"""A simple Particle3D emitter."""
def __init__(self, behavior, pos=(0,0,0)):
"""Create the emitter.
behavior must be the behavior class (not instance) that will control how the emitter and particles will behave
pos must be a three-part tuple of the position of the emitter"""
self.pos = pos
self.behavior = behavior(self)
self.particles = []
self.particle_type = Particle3D
self.visible = True
self.pickable = False
self.outline = False
self.outline_size = 4
self.outline_color=(1,0,0)
def get_dimensions(self):
"""Return the maximum dimensions (width/height/depth) of the emitter and particles."""
return self.behavior.get_dimensions()
def get_pos(self):
"""Return the emitter position."""
return self.pos
def get_scale(self):
"""Return the scale of the object."""
return 1,1,1
def update(self):
"""Update the emitter."""
self.behavior.emitter_update()
def render(self, camera):
"""Render and update all particles.
camera must be None of the camera the scene is using"""
self.update()
for i in self.particles:
i.render(camera)
class Behavior3D(object):
"""A simple behavior class to control an emitter and particles."""
def __init__(self, emitter):
"""Create the emitter.
emitter must be the emitter object that is using this behavior.
NOTE: this should never be called, the emitter object will do that!"""
self.emitter = emitter
self.particle_lifespan = 1
self.image = image.create_empty_image3d((8,8))
self.image.pos = self.emitter.pos
def get_dimensions(self):
"""Calculate and return the maximum dimensions (width/height/depth) of the emitter and particles."""
#calculate max width, height and depth of particles...
return 1, 1, 1
def emitter_update(self):
"""Update the emitter."""
pass
def particle_update(self, part):
"""Update a particle."""
part.age += 1
if part.age >= self.particle_lifespan:
part.kill()
def register_particle(self, part):
"""Register a particle."""
pass
class Fire3D(Behavior3D):
"""A simple fire behavior for an Emitter3D."""
def __init__(self, emitter):
Behavior3D.__init__(self, emitter)
self.image = image.create_empty_image3d((8,8), (1,.5,0,1))
self.image.scale = .25
self.image.pos = self.emitter.pos
self.particle_lifespan = 20
__init__.__doc__ = Behavior3D.__init__.__doc__
def get_dimensions(self):
return 2, 6, 2 #max/abs(min) directions(x,y,z) * particle_lifespan
get_dimensions.__doc__ = Behavior3D.get_dimensions.__doc__
def emitter_update(self):
for i in range(5):
self.emitter.particle_type(self.emitter, self)
emitter_update.__doc__ = Behavior3D.emitter_update.__doc__
def register_particle(self, part):
dx = randfloat(-.1, .1)
dy = randfloat(.15, .3)
dz = randfloat(-.1, .1)
rot = random.randint(-25, 25)
part.extra_data["dir"] = (dx, dy, dz)
part.extra_data["rot"] = rot
x, y, z = self.emitter.pos
part.image.pos = x+dx*randfloat(1, 2), y, z+dz*randfloat(1, 2)
register_particle.__doc__ = Behavior3D.register_particle.__doc__
def particle_update(self, part):
Behavior3D.particle_update(self, part)
x, y, z = part.image.pos
a, b, c = part.extra_data["dir"]
x += a
y += b
z += c
b -= .025
part.extra_data["dir"] = a, b, c
part.image.pos = x, y, z
x, y, z = part.image.rotation
z -= part.extra_data["rot"]
part.image.rotation = x, y, z
r, g, b, a = part.image.colorize
a -= .075
part.image.colorize = r, g, b, a
part.image.scale -= .025
particle_update.__doc__ = Behavior3D.particle_update.__doc__
class ParticlePoint(object):
"""A more complex particle that can be used in a VertexArray powered emitter."""
def __init__(self, parent, behavior):
"""Create the particle.
parent must be the emitter class creating the particle
behavior must be the behavior class that will handle how the particle behaves"""
self.parent = parent
self.pos = self.parent.pos
self.colorize = (1,1,1,1)
self.index = self.parent.add_particle(self)
self.extra_data = {}
self.behavior = behavior
self.behavior.register_particle(self)
self.age = 0
def get_vertex_index(self):
"""Return our unique index from our emitter's vertex array."""
return self.parent.particles.index(self)
def kill(self):
"""Kill the particle."""
self.parent.remove_particle(self)
def update(self):
"""Update the particle."""
self.behavior.particle_update(self)
x, y, z = self.pos
r, g, b, a = self.colorize
self.parent.vertex_array.verts[self.index][0] = x
self.parent.vertex_array.verts[self.index][1] = y
self.parent.vertex_array.verts[self.index][2] = z
self.parent.vertex_array.colors[self.index][0] = r
self.parent.vertex_array.colors[self.index][1] = g
self.parent.vertex_array.colors[self.index][2] = b
self.parent.vertex_array.colors[self.index][3] = a
class EmitterPoint(object):
"""A more complex particle emitter, that stores all particles in a vertex array."""
def __init__(self, behavior, pos=(0,0,0)):
"""Create the emitter.
behavior must be the behavior class (not instance) that will control how the emitter and particles will behave
pos must be a three-part tuple of the position of the emitter"""
self.pos = pos
self.behavior = behavior(self)
self.particles = numpy.empty(self.behavior.max_particles, dtype=object)
self.empty_spaces = []
self.last_number = 0
self.vertex_array = data.VertexArray(GL_POINTS, self.behavior.max_particles)
self.visible = True
self.pickable = False
self.outline = False
self.outline_size = 4
self.outline_color=(1,0,0)
self.particle_type = ParticlePoint
def get_dimensions(self):
"""Return the maximum dimensions (width/height/depth) of the emitter and particles."""
return self.behavior.get_dimensions()
def get_pos(self):
"""Return the emitter position."""
return self.pos
def get_scale(self):
"""Return the scale of the object."""
return 1,1,1
def add_particle(self, part):
"""Add the particle to the vertex array and assign it it's own index."""
if self.empty_spaces:
x = self.empty_spaces.pop(0)
self.particles[x] = part
return x
else:
self.particles[self.last_number] = part
self.last_number += 1
return self.last_number - 1
def remove_particle(self, part):
"""Remove the particle."""
if part.index+1 == self.last_number:
self.last_number -= 1
else:
self.empty_spaces.append(part.index)
self.particles[part.index] = None
def update(self):
"""Update the emitter."""
self.behavior.emitter_update()
def render(self, camera):
"""Render and update all particles.
camera must be None of the camera the scene is using"""
self.update()
glPointSize(self.behavior.point_size)
for i in self.particles:
if i:
i.update()
self.vertex_array.render()
class BehaviorPoint(object):
"""Almost the same as Behavior3D, except also has a max_particles attribute for the size of the vertex array."""
def __init__(self, emitter):
"""Create the emitter.
emitter must be the emitter object that is using this behavior.
NOTE: this should never be called, the emitter object will do that!"""
self.emitter = emitter
self.particle_lifespan = 1
self.max_particles = 2
def get_dimensions(self):
"""Calculate and return the maximum dimensions (width/height/depth) of the emitter and particles."""
return 1,1,1
def emitter_update(self):
"""Update the emitter."""
pass
def particle_update(self, part):
"""Update a particle."""
part.age += 1
if part.age >= self.particle_lifespan:
part.kill()
def register_particle(self, part):
"""Register a particle for us to control."""
pass
class FirePoint(BehaviorPoint):
"""A more complex fire behavior for an EmitterPoint."""
def __init__(self, emitter):
BehaviorPoint.__init__(self, emitter)
self.particle_lifespan = 20
self.point_size = 15
self.max_particles = 105 #self.particle_lifespan * emit rate (5) + 1 cycle of give space - as the emitter runs before the particles die...
__init__.__doc__ = BehaviorPoint.__init__.__doc__
def get_dimensions(self):
return 2, 6, 2 #max/abs(min) directions (x,y,z) of particles * particle_lifespan
get_dimensions.__doc__ = BehaviorPoint.get_dimensions.__doc__
def emitter_update(self):
for i in range(5):
self.emitter.particle_type(self.emitter, self)
emitter_update.__doc__ = BehaviorPoint.emitter_update.__doc__
def register_particle(self, part):
dx = randfloat(-.1, .1)
dy = randfloat(.15, .3)
dz = randfloat(-.1, .1)
part.extra_data["dir"] = (dx, dy, dz)
part.colorize = (1, 0, 0, 1)
x, y, z = self.emitter.pos
part.pos = x + dx * randfloat(1, 1.2), y, z + dz * randfloat(1, 1.2)
part.colorize = random.choice(((1, 0, 0, 1),
(1, .25, 0, 1),
(1, 1, 0, 1)))
register_particle.__doc__ = BehaviorPoint.register_particle.__doc__
def particle_update(self, part):
BehaviorPoint.particle_update(self, part)
r, g, b, a = part.colorize
g += .01
a -= 1.0/20
part.colorize = r, g, b, a
x, y, z = part.pos
a, b, c = part.extra_data["dir"]
x += a
y += b
z += c
b -= .01
part.extra_data["dir"] = a, b, c
part.pos = x, y, z
particle_update.__doc__ = BehaviorPoint.particle_update.__doc__
| mit |
openprivacy/.emacs.d | elpy/rpc-venv/lib/python3.8/site-packages/pip/_internal/pyproject.py | 13 | 7400 | from __future__ import absolute_import
import io
import os
import sys
from collections import namedtuple
from pip._vendor import six, toml
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._internal.exceptions import InstallationError
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Optional, List
def _is_list_of_str(obj):
# type: (Any) -> bool
return (
isinstance(obj, list) and
all(isinstance(item, six.string_types) for item in obj)
)
def make_pyproject_path(unpacked_source_directory):
# type: (str) -> str
path = os.path.join(unpacked_source_directory, 'pyproject.toml')
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(path, six.text_type):
path = path.encode(sys.getfilesystemencoding())
return path
BuildSystemDetails = namedtuple('BuildSystemDetails', [
'requires', 'backend', 'check', 'backend_path'
])
def load_pyproject_toml(
use_pep517, # type: Optional[bool]
pyproject_toml, # type: str
setup_py, # type: str
req_name # type: str
):
# type: (...) -> Optional[BuildSystemDetails]
"""Load the pyproject.toml file.
Parameters:
use_pep517 - Has the user requested PEP 517 processing? None
means the user hasn't explicitly specified.
pyproject_toml - Location of the project's pyproject.toml file
setup_py - Location of the project's setup.py file
req_name - The name of the requirement we're processing (for
error reporting)
Returns:
None if we should use the legacy code path, otherwise a tuple
(
requirements from pyproject.toml,
name of PEP 517 backend,
requirements we should check are installed after setting
up the build environment
directory paths to import the backend from (backend-path),
relative to the project root.
)
"""
has_pyproject = os.path.isfile(pyproject_toml)
has_setup = os.path.isfile(setup_py)
if has_pyproject:
with io.open(pyproject_toml, encoding="utf-8") as f:
pp_toml = toml.load(f)
build_system = pp_toml.get("build-system")
else:
build_system = None
# The following cases must use PEP 517
# We check for use_pep517 being non-None and falsey because that means
# the user explicitly requested --no-use-pep517. The value 0 as
# opposed to False can occur when the value is provided via an
# environment variable or config file option (due to the quirk of
# strtobool() returning an integer in pip's configuration code).
if has_pyproject and not has_setup:
if use_pep517 is not None and not use_pep517:
raise InstallationError(
"Disabling PEP 517 processing is invalid: "
"project does not have a setup.py"
)
use_pep517 = True
elif build_system and "build-backend" in build_system:
if use_pep517 is not None and not use_pep517:
raise InstallationError(
"Disabling PEP 517 processing is invalid: "
"project specifies a build backend of {} "
"in pyproject.toml".format(
build_system["build-backend"]
)
)
use_pep517 = True
# If we haven't worked out whether to use PEP 517 yet,
# and the user hasn't explicitly stated a preference,
# we do so if the project has a pyproject.toml file.
elif use_pep517 is None:
use_pep517 = has_pyproject
# At this point, we know whether we're going to use PEP 517.
assert use_pep517 is not None
# If we're using the legacy code path, there is nothing further
# for us to do here.
if not use_pep517:
return None
if build_system is None:
# Either the user has a pyproject.toml with no build-system
# section, or the user has no pyproject.toml, but has opted in
# explicitly via --use-pep517.
# In the absence of any explicit backend specification, we
# assume the setuptools backend that most closely emulates the
# traditional direct setup.py execution, and require wheel and
# a version of setuptools that supports that backend.
build_system = {
"requires": ["setuptools>=40.8.0", "wheel"],
"build-backend": "setuptools.build_meta:__legacy__",
}
# If we're using PEP 517, we have build system information (either
# from pyproject.toml, or defaulted by the code above).
# Note that at this point, we do not know if the user has actually
# specified a backend, though.
assert build_system is not None
# Ensure that the build-system section in pyproject.toml conforms
# to PEP 518.
error_template = (
"{package} has a pyproject.toml file that does not comply "
"with PEP 518: {reason}"
)
# Specifying the build-system table but not the requires key is invalid
if "requires" not in build_system:
raise InstallationError(
error_template.format(package=req_name, reason=(
"it has a 'build-system' table but not "
"'build-system.requires' which is mandatory in the table"
))
)
# Error out if requires is not a list of strings
requires = build_system["requires"]
if not _is_list_of_str(requires):
raise InstallationError(error_template.format(
package=req_name,
reason="'build-system.requires' is not a list of strings.",
))
# Each requirement must be valid as per PEP 508
for requirement in requires:
try:
Requirement(requirement)
except InvalidRequirement:
raise InstallationError(
error_template.format(
package=req_name,
reason=(
"'build-system.requires' contains an invalid "
"requirement: {!r}".format(requirement)
),
)
)
backend = build_system.get("build-backend")
backend_path = build_system.get("backend-path", [])
check = [] # type: List[str]
if backend is None:
# If the user didn't specify a backend, we assume they want to use
# the setuptools backend. But we can't be sure they have included
# a version of setuptools which supplies the backend, or wheel
# (which is needed by the backend) in their requirements. So we
# make a note to check that those requirements are present once
# we have set up the environment.
# This is quite a lot of work to check for a very specific case. But
# the problem is, that case is potentially quite common - projects that
# adopted PEP 518 early for the ability to specify requirements to
# execute setup.py, but never considered needing to mention the build
# tools themselves. The original PEP 518 code had a similar check (but
# implemented in a different way).
backend = "setuptools.build_meta:__legacy__"
check = ["setuptools>=40.8.0", "wheel"]
return BuildSystemDetails(requires, backend, check, backend_path)
| gpl-2.0 |
ChristosChristofidis/bokeh | examples/glyphs/iris_splom.py | 43 | 2936 | from __future__ import print_function
from math import pi
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Circle, Text
from bokeh.models import (
BasicTicker, ColumnDataSource, Grid, GridPlot, LinearAxis,
DataRange1d, PanTool, Plot, WheelZoomTool
)
from bokeh.resources import INLINE
from bokeh.sampledata.iris import flowers
colormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}
flowers['color'] = flowers['species'].map(lambda x: colormap[x])
source = ColumnDataSource(
data=dict(
petal_length=flowers['petal_length'],
petal_width=flowers['petal_width'],
sepal_length=flowers['sepal_length'],
sepal_width=flowers['sepal_width'],
color=flowers['color']
)
)
text_source = ColumnDataSource(
data=dict(xcenter=[125], ycenter=[135])
)
xdr = DataRange1d()
ydr = DataRange1d()
def make_plot(xname, yname, xax=False, yax=False, text=None):
plot = Plot(
x_range=xdr, y_range=ydr, background_fill="#efe8e2",
border_fill='white', title="", min_border=2, h_symmetry=False, v_symmetry=False,
plot_width=250, plot_height=250)
circle = Circle(x=xname, y=yname, fill_color="color", fill_alpha=0.2, size=4, line_color="color")
r = plot.add_glyph(source, circle)
xdr.renderers.append(r)
ydr.renderers.append(r)
xticker = BasicTicker()
if xax:
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
xticker = xaxis.ticker
plot.add_layout(Grid(dimension=0, ticker=xticker))
yticker = BasicTicker()
if yax:
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
yticker = yaxis.ticker
plot.add_layout(Grid(dimension=1, ticker=yticker))
plot.add_tools(PanTool(), WheelZoomTool())
if text:
text = " ".join(text.split('_'))
text = Text(
x={'field':'xcenter', 'units':'screen'},
y={'field':'ycenter', 'units':'screen'},
text=[text], angle=pi/4, text_font_style="bold", text_baseline="top",
text_color="#ffaaaa", text_alpha=0.7, text_align="center", text_font_size="28pt"
)
plot.add_glyph(text_source, text)
return plot
xattrs = ["petal_length", "petal_width", "sepal_width", "sepal_length"]
yattrs = list(reversed(xattrs))
plots = []
for y in yattrs:
row = []
for x in xattrs:
xax = (y == yattrs[-1])
yax = (x == xattrs[0])
text = x if (x==y) else None
plot = make_plot(x, y, xax, yax, text)
row.append(plot)
plots.append(row)
grid = GridPlot(children=plots, title="iris_splom")
doc = Document()
doc.add(grid)
if __name__ == "__main__":
filename = "iris_splom.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Iris Data SPLOM"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/aio/operations/_express_route_circuit_peerings_operations.py | 1 | 21703 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitPeeringsOperations:
"""ExpressRouteCircuitPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> "_models.ExpressRouteCircuitPeering":
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs
) -> "_models.ExpressRouteCircuitPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCircuitPeering",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteCircuitPeering"]:
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update express route circuit
peering operation.
:type peering_parameters: ~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCircuitPeeringListResult"]:
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitPeeringListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'} # type: ignore
| mit |
xq262144/hue | desktop/core/ext-py/lxml-3.3.6/src/lxml/tests/test_nsclasses.py | 19 | 7242 | # -*- coding: utf-8 -*-
"""
Test cases related to namespace implementation classes and the
namespace registry mechanism
"""
import unittest, sys, os.path
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, HelperTestCase, _bytes
from common_imports import doctest, make_doctest
class ETreeNamespaceClassesTestCase(HelperTestCase):
class default_class(etree.ElementBase):
pass
class maeh_class(etree.ElementBase):
def maeh(self):
return 'maeh'
class bluff_class(etree.ElementBase):
def bluff(self):
return 'bluff'
def setUp(self):
super(ETreeNamespaceClassesTestCase, self).setUp()
lookup = etree.ElementNamespaceClassLookup()
self.Namespace = lookup.get_namespace
parser = etree.XMLParser()
parser.set_element_class_lookup(lookup)
etree.set_default_parser(parser)
def tearDown(self):
etree.set_default_parser()
del self.Namespace
super(ETreeNamespaceClassesTestCase, self).tearDown()
def test_registry(self):
ns = self.Namespace('ns01')
ns['maeh'] = self.maeh_class
self.Namespace('ns01').clear()
self.Namespace('ns02').update({'maeh' : self.maeh_class})
self.Namespace('ns03').update({'bluff' : self.bluff_class}.items())
self.Namespace('ns02').clear()
self.Namespace('ns03').clear()
def test_ns_classes(self):
bluff_dict = {'bluff' : self.bluff_class}
maeh_dict = {'maeh' : self.maeh_class}
self.Namespace('ns10').update(bluff_dict)
tree = self.parse(_bytes('<bluff xmlns="ns10"><ns11:maeh xmlns:ns11="ns11"/></bluff>'))
el = tree.getroot()
self.assertTrue(isinstance(el, etree.ElementBase))
self.assertTrue(hasattr(el, 'bluff'))
self.assertFalse(hasattr(el[0], 'maeh'))
self.assertFalse(hasattr(el[0], 'bluff'))
self.assertEqual(el.bluff(), 'bluff')
del el
self.Namespace('ns11').update(maeh_dict)
el = tree.getroot()
self.assertTrue(hasattr(el, 'bluff'))
self.assertTrue(hasattr(el[0], 'maeh'))
self.assertEqual(el.bluff(), 'bluff')
self.assertEqual(el[0].maeh(), 'maeh')
del el
self.Namespace('ns10').clear()
tree = self.parse(_bytes('<bluff xmlns="ns10"><ns11:maeh xmlns:ns11="ns11"/></bluff>'))
el = tree.getroot()
self.assertFalse(hasattr(el, 'bluff'))
self.assertFalse(hasattr(el, 'maeh'))
self.assertFalse(hasattr(el[0], 'bluff'))
self.assertTrue(hasattr(el[0], 'maeh'))
self.Namespace('ns11').clear()
def test_default_tagname(self):
bluff_dict = {
None : self.bluff_class,
'maeh' : self.maeh_class
}
ns = self.Namespace("uri:nsDefClass")
ns.update(bluff_dict)
tree = self.parse(_bytes('''
<test xmlns="bla" xmlns:ns1="uri:nsDefClass" xmlns:ns2="uri:nsDefClass">
<ns2:el1/><ns1:el2/><ns1:maeh/><ns2:maeh/><maeh/>
</test>
'''))
el = tree.getroot()
self.assertFalse(isinstance(el, etree.ElementBase))
for child in el[:-1]:
self.assertTrue(isinstance(child, etree.ElementBase), child.tag)
self.assertFalse(isinstance(el[-1], etree.ElementBase))
self.assertTrue(hasattr(el[0], 'bluff'))
self.assertTrue(hasattr(el[1], 'bluff'))
self.assertTrue(hasattr(el[2], 'maeh'))
self.assertTrue(hasattr(el[3], 'maeh'))
self.assertFalse(hasattr(el[4], 'maeh'))
del el
ns.clear()
def test_create_element(self):
bluff_dict = {'bluff' : self.bluff_class}
self.Namespace('ns20').update(bluff_dict)
maeh_dict = {'maeh' : self.maeh_class}
self.Namespace('ns21').update(maeh_dict)
el = etree.Element("{ns20}bluff")
self.assertTrue(hasattr(el, 'bluff'))
child = etree.SubElement(el, "{ns21}maeh")
self.assertTrue(hasattr(child, 'maeh'))
child = etree.SubElement(el, "{ns20}bluff")
self.assertTrue(hasattr(child, 'bluff'))
child = etree.SubElement(el, "{ns21}bluff")
self.assertFalse(hasattr(child, 'bluff'))
self.assertFalse(hasattr(child, 'maeh'))
self.assertTrue(hasattr(el[0], 'maeh'))
self.assertTrue(hasattr(el[1], 'bluff'))
self.assertFalse(hasattr(el[2], 'bluff'))
self.assertFalse(hasattr(el[2], 'maeh'))
self.assertEqual(el.bluff(), 'bluff')
self.assertEqual(el[0].maeh(), 'maeh')
self.assertEqual(el[1].bluff(), 'bluff')
self.Namespace('ns20').clear()
self.Namespace('ns21').clear()
def test_create_element_default(self):
bluff_dict = {None : self.bluff_class}
self.Namespace('ns30').update(bluff_dict)
maeh_dict = {'maeh' : self.maeh_class}
self.Namespace(None).update(maeh_dict)
el = etree.Element("{ns30}bluff")
etree.SubElement(el, "maeh")
self.assertTrue(hasattr(el, 'bluff'))
self.assertTrue(hasattr(el[0], 'maeh'))
self.assertEqual(el.bluff(), 'bluff')
self.assertEqual(el[0].maeh(), 'maeh')
self.Namespace(None).clear()
self.Namespace('ns30').clear()
def test_element_creation(self):
default, bluff, maeh = (
self.default_class, self.bluff_class, self.maeh_class)
class honk(etree.ElementBase):
TAG = 'HONK'
NAMESPACE = 'http://a.b/c'
el = default(
"test",
"text",
bluff(honk, "TaIL", maeh),
maeh("TeXT", bluff, honk(), "TAiL"),
"Tail")
self.assertEqual('default_class', el.tag)
self.assertEqual('testtext', el.text)
self.assertEqual(None, el.tail)
self.assertEqual(2, len(el))
self.assertEqual(7, len(list(el.iter())))
self.assertEqual('bluff_class', el[0].tag)
self.assertEqual('TaIL', el[0][0].tail)
self.assertEqual('TaIL', ''.join(el[0].itertext()))
self.assertEqual('{http://a.b/c}HONK',
el[0][0].tag)
self.assertEqual('maeh_class',
el[0][1].tag)
self.assertEqual('maeh_class', el[1].tag)
self.assertEqual('TeXT', el[1].text)
self.assertEqual('bluff_class', el[1][0].tag)
self.assertEqual('{http://a.b/c}HONK', el[1][1].tag)
self.assertEqual('TAiL', el[1][1].tail)
self.assertEqual('TeXTTAiL',
''.join(el[1].itertext()))
self.assertEqual('Tail', el[1].tail)
self.assertEqual('TAiL', el[1][1].tail)
self.assertEqual('bluff_class', el[1][0].tag)
self.assertEqual('{http://a.b/c}HONK', el[1][1].tag)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeNamespaceClassesTestCase)])
suite.addTests(
[make_doctest('../../../doc/element_classes.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| apache-2.0 |
BORETS24/Zenfone-2-500CL | linux/kernel/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
pfmoore/pip | docs/pip_sphinxext.py | 4 | 10370 | """pip sphinx extensions"""
import optparse
import pathlib
import re
import sys
from textwrap import dedent
from typing import Dict, Iterable, Iterator, List, Optional, Union
from docutils import nodes, statemachine
from docutils.parsers import rst
from docutils.statemachine import StringList, ViewList
from sphinx.application import Sphinx
from pip._internal.cli import cmdoptions
from pip._internal.commands import commands_dict, create_command
from pip._internal.req.req_file import SUPPORTED_OPTIONS
class PipNewsInclude(rst.Directive):
required_arguments = 1
def _is_version_section_title_underline(
self, prev: Optional[str], curr: str
) -> bool:
"""Find a ==== line that marks the version section title."""
if prev is None:
return False
if re.match(r"^=+$", curr) is None:
return False
if len(curr) < len(prev):
return False
return True
def _iter_lines_with_refs(self, lines: Iterable[str]) -> Iterator[str]:
"""Transform the input lines to add a ref before each section title.
This is done by looking one line ahead and locate a title's underline,
and add a ref before the title text.
Dots in the version is converted into dash, and a ``v`` is prefixed.
This makes Sphinx use them as HTML ``id`` verbatim without generating
auto numbering (which would make the the anchors unstable).
"""
prev = None
for line in lines:
# Transform the previous line to include an explicit ref.
if self._is_version_section_title_underline(prev, line):
assert prev is not None
vref = prev.split(None, 1)[0].replace(".", "-")
yield f".. _`v{vref}`:"
yield "" # Empty line between ref and the title.
if prev is not None:
yield prev
prev = line
if prev is not None:
yield prev
def run(self) -> List[nodes.Node]:
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1,
)
path = (
pathlib.Path(source).resolve().parent.joinpath(self.arguments[0]).resolve()
)
include_lines = statemachine.string2lines(
path.read_text(encoding="utf-8"),
self.state.document.settings.tab_width,
convert_whitespace=True,
)
include_lines = list(self._iter_lines_with_refs(include_lines))
self.state_machine.insert_input(include_lines, str(path))
return []
class PipCommandUsage(rst.Directive):
required_arguments = 1
optional_arguments = 3
def run(self) -> List[nodes.Node]:
cmd = create_command(self.arguments[0])
cmd_prefix = "python -m pip"
if len(self.arguments) > 1:
cmd_prefix = " ".join(self.arguments[1:])
cmd_prefix = cmd_prefix.strip('"')
cmd_prefix = cmd_prefix.strip("'")
usage = dedent(cmd.usage.replace("%prog", f"{cmd_prefix} {cmd.name}")).strip()
node = nodes.literal_block(usage, usage)
return [node]
class PipCommandDescription(rst.Directive):
required_arguments = 1
def run(self) -> List[nodes.Node]:
node = nodes.paragraph()
node.document = self.state.document
desc = ViewList()
cmd = create_command(self.arguments[0])
assert cmd.__doc__ is not None
description = dedent(cmd.__doc__)
for line in description.split("\n"):
desc.append(line, "")
self.state.nested_parse(desc, 0, node)
return [node]
class PipOptions(rst.Directive):
def _format_option(
self, option: optparse.Option, cmd_name: Optional[str] = None
) -> List[str]:
bookmark_line = (
f".. _`{cmd_name}_{option._long_opts[0]}`:"
if cmd_name
else f".. _`{option._long_opts[0]}`:"
)
line = ".. option:: "
if option._short_opts:
line += option._short_opts[0]
if option._short_opts and option._long_opts:
line += ", " + option._long_opts[0]
elif option._long_opts:
line += option._long_opts[0]
if option.takes_value():
metavar = option.metavar or option.dest
assert metavar is not None
line += f" <{metavar.lower()}>"
# fix defaults
assert option.help is not None
# https://github.com/python/typeshed/pull/5080
opt_help = option.help.replace("%default", str(option.default)) # type: ignore
# fix paths with sys.prefix
opt_help = opt_help.replace(sys.prefix, "<sys.prefix>")
return [bookmark_line, "", line, "", " " + opt_help, ""]
def _format_options(
self, options: Iterable[optparse.Option], cmd_name: Optional[str] = None
) -> None:
for option in options:
if option.help == optparse.SUPPRESS_HELP:
continue
for line in self._format_option(option, cmd_name):
self.view_list.append(line, "")
def run(self) -> List[nodes.Node]:
node = nodes.paragraph()
node.document = self.state.document
self.view_list = ViewList()
self.process_options()
self.state.nested_parse(self.view_list, 0, node)
return [node]
class PipGeneralOptions(PipOptions):
def process_options(self) -> None:
self._format_options([o() for o in cmdoptions.general_group["options"]])
class PipIndexOptions(PipOptions):
required_arguments = 1
def process_options(self) -> None:
cmd_name = self.arguments[0]
self._format_options(
[o() for o in cmdoptions.index_group["options"]],
cmd_name=cmd_name,
)
class PipCommandOptions(PipOptions):
required_arguments = 1
def process_options(self) -> None:
cmd = create_command(self.arguments[0])
self._format_options(
cmd.parser.option_groups[0].option_list,
cmd_name=cmd.name,
)
class PipReqFileOptionsReference(PipOptions):
def determine_opt_prefix(self, opt_name: str) -> str:
for command in commands_dict:
cmd = create_command(command)
if cmd.cmd_opts.has_option(opt_name):
return command
raise KeyError(f"Could not identify prefix of opt {opt_name}")
def process_options(self) -> None:
for option in SUPPORTED_OPTIONS:
if getattr(option, "deprecated", False):
continue
opt = option()
opt_name = opt._long_opts[0]
if opt._short_opts:
short_opt_name = "{}, ".format(opt._short_opts[0])
else:
short_opt_name = ""
if option in cmdoptions.general_group["options"]:
prefix = ""
else:
prefix = "{}_".format(self.determine_opt_prefix(opt_name))
self.view_list.append(
"* :ref:`{short}{long}<{prefix}{opt_name}>`".format(
short=short_opt_name,
long=opt_name,
prefix=prefix,
opt_name=opt_name,
),
"\n",
)
class PipCLIDirective(rst.Directive):
"""
- Only works when used in a MyST document.
- Requires sphinx-inline-tabs' tab directive.
"""
has_content = True
optional_arguments = 1
def run(self) -> List[nodes.Node]:
node = nodes.paragraph()
node.document = self.state.document
os_variants = {
"Linux": {
"highlighter": "console",
"executable": "python",
"prompt": "$",
},
"MacOS": {
"highlighter": "console",
"executable": "python",
"prompt": "$",
},
"Windows": {
"highlighter": "doscon",
"executable": "py",
"prompt": "C:>",
},
}
if self.arguments:
assert self.arguments == ["in-a-venv"]
in_virtual_environment = True
else:
in_virtual_environment = False
lines = []
# Create a tab for each OS
for os, variant in os_variants.items():
# Unpack the values
prompt = variant["prompt"]
highlighter = variant["highlighter"]
if in_virtual_environment:
executable = "python"
pip_spelling = "pip"
else:
executable = variant["executable"]
pip_spelling = f"{executable} -m pip"
# Substitute the various "prompts" into the correct variants
substitution_pipeline = [
(
r"(^|(?<=\n))\$ python",
f"{prompt} {executable}",
),
(
r"(^|(?<=\n))\$ pip",
f"{prompt} {pip_spelling}",
),
]
content = self.block_text
for pattern, substitution in substitution_pipeline:
content = re.sub(pattern, substitution, content)
# Write the tab
lines.append(f"````{{tab}} {os}")
lines.append(f"```{highlighter}")
lines.append(f"{content}")
lines.append("```")
lines.append("````")
string_list = StringList(lines)
self.state.nested_parse(string_list, 0, node)
return [node]
def setup(app: Sphinx) -> Dict[str, Union[bool, str]]:
app.add_directive("pip-command-usage", PipCommandUsage)
app.add_directive("pip-command-description", PipCommandDescription)
app.add_directive("pip-command-options", PipCommandOptions)
app.add_directive("pip-general-options", PipGeneralOptions)
app.add_directive("pip-index-options", PipIndexOptions)
app.add_directive(
"pip-requirements-file-options-ref-list", PipReqFileOptionsReference
)
app.add_directive("pip-news-include", PipNewsInclude)
app.add_directive("pip-cli", PipCLIDirective)
return {
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| mit |
uladz/sahara-image-elements | setup.py | 334 | 1028 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True)
| apache-2.0 |
N3MIS15/maraschino-webcam | modules/xbmc_notify.py | 1 | 1958 | from flask import Flask, jsonify, render_template, request
import os
from maraschino import app, RUNDIR, logger
from socket import *
from xbmc.xbmcclient import *
from maraschino.tools import get_file_list
from maraschino.models import XbmcServer
@app.route('/xhr/xbmc_notify', methods=['post'])
def xhr_notify():
label = request.form['label']
hostname = request.form['hostname']
dir = os.path.join(RUNDIR, 'static', 'images', 'notifications')
icons = get_file_list(
folder = dir,
extensions = ['.png', '.jpg'],
prepend_path = False,
)
return render_template('dialogs/xbmc_notify_dialog.html',
label = label,
hostname = hostname,
icons = icons,
)
@app.route('/xhr/xbmc_notify/send', methods=['post'])
def xhr_notify_message():
label = str(request.form['label'])
hostname = str(request.form['hostname'])
message = str(request.form['message'])
title = str(request.form['title'])
port = 9777
icon = os.path.join(RUNDIR, 'static', 'images', 'notifications', request.form['image'])
if title == "Title":
title = "Maraschino"
if not os.path.exists(icon):
icon = os.path.join(RUNDIR, 'static', 'images', 'maraschino_logo.png')
if icon[-3:] == "png":
icon_type = ICON_PNG
elif icon[-3:] == "jpg":
icon_type = ICON_JPEG
elif icon[-4:] == "jpeg":
icon_type = ICON_JPEG
elif icon[-3:] == "gif":
icon_type = ICON_GIF
else:
icon_type = ICON_NONE
addr = (hostname, port)
sock = socket(AF_INET,SOCK_DGRAM)
try:
logger.log('NOTIFY XBMC :: Sending message to %s' % label, 'INFO')
packet = PacketNOTIFICATION(title, message, icon_type, icon)
packet.send(sock, addr)
return jsonify({ 'status': 'successful'})
except:
logger.log('NOTIFY XBMC :: Message failed to send', 'ERROR')
return jsonify({ 'error': 'Message failed to send'})
| mit |
quxiaolong1504/django | django/conf/locale/sk/formats.py | 504 | 1173 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
lorentey/swift | utils/build_swift/tests/expected_options.py | 2 | 24101 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import multiprocessing
from swift_build_support.swift_build_support import host
from swift_build_support.swift_build_support import targets
from .. import argparse
from .. import defaults
__all__ = [
'HelpOption',
'SetOption',
'SetTrueOption',
'SetFalseOption',
'DisableOption',
'EnableOption',
'ChoicesOption',
'IntOption',
'StrOption',
'PathOption',
'AppendOption',
'UnsupportedOption',
'IgnoreOption',
'EXPECTED_OPTIONS',
'EXPECTED_DEFAULTS',
]
# -----------------------------------------------------------------------------
EXPECTED_DEFAULTS = {
'android': False,
'android_api_level': '21',
'android_deploy_device_path': '/data/local/tmp',
'android_icu_i18n': None,
'android_icu_i18n_include': None,
'android_icu_uc': None,
'android_icu_uc_include': None,
'android_icu_data': None,
'android_ndk': None,
'android_ndk_gcc_version': '4.9',
'android_arch': 'armv7',
'assertions': True,
'benchmark': False,
'benchmark_num_o_iterations': 3,
'benchmark_num_onone_iterations': 3,
'build_android': False,
'build_args': [],
'build_benchmarks': True,
'build_clang_tools_extra': True,
'build_cygwin': True,
'build_external_benchmarks': False,
'build_foundation': False,
'build_freebsd': True,
'build_ios': True,
'build_ios_device': False,
'build_ios_simulator': False,
'build_jobs': multiprocessing.cpu_count(),
'build_libdispatch': False,
'build_libicu': False,
'build_linux': True,
'build_llbuild': False,
'build_lldb': False,
'build_libcxx': False,
'build_ninja': False,
'build_osx': True,
'build_playgroundsupport': False,
'build_runtime_with_host_compiler': False,
'build_stdlib_deployment_targets': ['all'],
'build_subdir': None,
'build_swift_dynamic_sdk_overlay': True,
'build_swift_dynamic_stdlib': True,
'build_swift_static_sdk_overlay': False,
'build_swift_static_stdlib': False,
'build_swift_stdlib_unittest_extra': False,
'build_swiftpm': False,
'build_swiftsyntax': False,
'build_libparser_only': False,
'build_skstresstester': False,
'build_swiftevolve': False,
'build_indexstoredb': False,
'build_sourcekitlsp': False,
'install_swiftpm': False,
'install_swiftsyntax': False,
'skip_install_swiftsyntax_module': False,
'swiftsyntax_verify_generated_files': False,
'install_sourcekitlsp': False,
'install_skstresstester': False,
'install_swiftevolve': False,
'build_toolchainbenchmarks': False,
'build_tvos': True,
'build_tvos_device': False,
'build_tvos_simulator': False,
'build_variant': 'Debug',
'build_watchos': True,
'build_watchos_device': False,
'build_watchos_simulator': False,
'build_xctest': False,
'cmake_c_launcher': None,
'cmake_cxx_launcher': None,
'clang_compiler_version': None,
'clang_profile_instr_use': None,
'clang_user_visible_version': defaults.CLANG_USER_VISIBLE_VERSION,
'clean': False,
'cmake': None,
'cmake_generator': 'Ninja',
'cmark_assertions': True,
'cmark_build_variant': 'Debug',
'compiler_vendor': defaults.COMPILER_VENDOR,
'coverage_db': None,
'cross_compile_hosts': [],
'darwin_deployment_version_ios':
defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
'darwin_deployment_version_osx':
defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
'darwin_deployment_version_tvos':
defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
'darwin_deployment_version_watchos':
defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
'darwin_xcrun_toolchain': None,
'distcc': False,
'dry_run': False,
'enable_asan': False,
'enable_experimental_differentiable_programming': True,
'enable_lsan': False,
'enable_sanitize_coverage': False,
'disable_guaranteed_normal_arguments': False,
'enable_stdlibcore_exclusivity_checking': False,
'enable_tsan': False,
'enable_tsan_runtime': False,
'enable_ubsan': False,
'export_compile_commands': False,
'extra_cmake_options': [],
'extra_swift_args': [],
'force_optimized_typechecker': False,
'foundation_build_variant': 'Debug',
'host_cc': None,
'host_cxx': None,
'host_libtool': None,
'host_lipo': None,
'host_target': targets.StdlibDeploymentTarget.host_target().name,
'host_test': False,
'only_executable_test': False,
'install_prefix': targets.install_prefix(),
'install_symroot': None,
'install_destdir': None,
'ios': False,
'ios_all': False,
'legacy_impl': False,
'libdispatch_build_variant': 'Debug',
'libicu_build_variant': 'Debug',
'lit_args': '-sv',
'llbuild_assertions': True,
'lldb_assertions': True,
'lldb_build_variant': 'Debug',
'lldb_build_with_xcode': '0',
'llvm_assertions': True,
'llvm_build_variant': 'Debug',
'llvm_max_parallel_lto_link_jobs':
host.max_lto_link_job_counts()['llvm'],
'llvm_targets_to_build': 'X86;ARM;AArch64;PowerPC;SystemZ;Mips',
'tsan_libdispatch_test': False,
'long_test': False,
'lto_type': None,
'dump_config': False,
'show_sdks': False,
'skip_build': False,
'skip_local_build': False,
'stdlib_deployment_targets': None,
'stress_test': False,
'swift_analyze_code_coverage': defaults.SWIFT_ANALYZE_CODE_COVERAGE,
'swift_assertions': True,
'swift_build_variant': 'Debug',
'swift_compiler_version': None,
'swift_darwin_module_archs': None,
'swift_darwin_supported_archs': None,
'swift_stdlib_assertions': True,
'swift_stdlib_build_variant': 'Debug',
'swift_tools_max_parallel_lto_link_jobs':
host.max_lto_link_job_counts()['swift'],
'swift_user_visible_version': defaults.SWIFT_USER_VISIBLE_VERSION,
'symbols_package': None,
'test': None,
'test_android': False,
'test_android_host': False,
'test_cygwin': False,
'test_freebsd': False,
'test_ios': False,
'test_ios_32bit_simulator': True,
'test_ios_host': False,
'test_ios_simulator': False,
'test_linux': False,
'test_optimize_for_size': None,
'test_optimize_none_with_implicit_dynamic': None,
'test_optimized': None,
'test_osx': False,
'test_paths': [],
'test_tvos': False,
'test_tvos_host': False,
'test_tvos_simulator': False,
'test_watchos': False,
'test_watchos_host': False,
'test_watchos_simulator': False,
'test_swiftpm': False,
'test_swiftsyntax': False,
'test_indexstoredb': False,
'test_sourcekitlsp': False,
'test_skstresstester': False,
'test_swiftevolve': False,
'test_toolchainbenchmarks': False,
'tvos': False,
'tvos_all': False,
'validation_test': None,
'verbose_build': False,
'watchos': False,
'watchos_all': False
}
# -----------------------------------------------------------------------------
def _sanitize_option_string(option_string):
if option_string.startswith('--'):
return option_string[2:].replace('-', '_')
if len(option_string) == 2 and option_string[0] == '-':
return option_string[1]
raise ValueError('invalid option_string format: ' + option_string)
class _BaseOption(object):
def __init__(self, option_string, dest=None, default=None):
if dest is None:
dest = _sanitize_option_string(option_string)
if default is None:
default = EXPECTED_DEFAULTS.get(dest, None)
self.option_string = option_string
self.dest = dest
self.default = default
def sanitized_string(self):
return _sanitize_option_string(self.option_string)
class HelpOption(_BaseOption):
"""Option that prints the help message and exits."""
pass
class SetOption(_BaseOption):
"""Option that accepts no arguments, setting the destination to a
hard-coded value or None.
"""
def __init__(self, *args, **kwargs):
self.value = kwargs.pop('value', None)
super(SetOption, self).__init__(*args, **kwargs)
class SetTrueOption(_BaseOption):
"""Option that accepts no arguments, setting the destination value to True
if parsed and defaulting to False otherwise.
"""
pass
class SetFalseOption(_BaseOption):
"""Option that accepts no arguments, setting the destination value to False
if parsed and defaulting to True otherwise.
"""
pass
class EnableOption(_BaseOption):
"""Option that sets the destination to True when parsed and False by default.
Can be toggled True or False with an optional bool argument.
"""
pass
class DisableOption(_BaseOption):
"""Option that sets the destination to False when parsed and True by default.
Can be toggled True or False with an optional bool argument, which is then
negated. Thus if an option is passed the value 'True' it will set the
destination to False and vice versa.
"""
pass
class ChoicesOption(_BaseOption):
"""Option that accepts an argument from a predifined list of choices."""
def __init__(self, *args, **kwargs):
self.choices = kwargs.pop('choices', None)
super(ChoicesOption, self).__init__(*args, **kwargs)
class IntOption(_BaseOption):
"""Option that accepts an int argument."""
pass
class StrOption(_BaseOption):
"""Option that accepts a str argument."""
pass
class PathOption(_BaseOption):
"""Option that accepts a path argument."""
pass
class AppendOption(_BaseOption):
"""Option that can be called more than once to append argument to internal
list.
"""
pass
class UnsupportedOption(_BaseOption):
"""Option that is not supported."""
pass
class IgnoreOption(_BaseOption):
"""Option that should be ignored when generating tests. Instead a test
should be written manually as the behavior cannot or should not be auto-
generated.
"""
pass
class BuildScriptImplOption(_BaseOption):
"""Option that gets forwarded to build-script-impl by migration.py and is
only listed for disambiguation by argparse.
"""
pass
# -----------------------------------------------------------------------------
EXPECTED_OPTIONS = [
# Ignore the help options since they always call sys.exit(0)
HelpOption('-h', dest='help', default=argparse.SUPPRESS),
HelpOption('--help', dest='help', default=argparse.SUPPRESS),
SetOption('--debug', dest='build_variant', value='Debug'),
SetOption('--debug-cmark', dest='cmark_build_variant', value='Debug'),
SetOption('--debug-foundation',
dest='foundation_build_variant', value='Debug'),
SetOption('--debug-libdispatch',
dest='libdispatch_build_variant', value='Debug'),
SetOption('--debug-libicu', dest='libicu_build_variant', value='Debug'),
SetOption('--debug-lldb', dest='lldb_build_variant', value='Debug'),
SetOption('--lldb-build-with-xcode', dest='lldb_build_with_xcode',
value='1'),
SetOption('--lldb-build-with-cmake', dest='lldb_build_with_xcode',
value='0'),
SetOption('--debug-llvm', dest='llvm_build_variant', value='Debug'),
SetOption('--debug-swift', dest='swift_build_variant', value='Debug'),
SetOption('--debug-swift-stdlib',
dest='swift_stdlib_build_variant', value='Debug'),
SetOption('--eclipse',
dest='cmake_generator', value='Eclipse CDT4 - Ninja'),
SetOption('--make', dest='cmake_generator', value='Unix Makefiles'),
SetOption('--release', dest='build_variant', value='Release'),
SetOption('--release-debuginfo',
dest='build_variant', value='RelWithDebInfo'),
SetOption('--xcode', dest='cmake_generator', value='Xcode'),
SetOption('-R', dest='build_variant', value='Release'),
SetOption('-d', dest='build_variant', value='Debug'),
SetOption('-e', dest='cmake_generator', value='Eclipse CDT4 - Ninja'),
SetOption('-m', dest='cmake_generator', value='Unix Makefiles'),
SetOption('-r', dest='build_variant', value='RelWithDebInfo'),
SetOption('-x', dest='cmake_generator', value='Xcode'),
# FIXME: Convert these options to set_true actions
SetOption('--assertions', value=True),
SetOption('--cmark-assertions', value=True),
SetOption('--lldb-assertions', value=True),
SetOption('--llvm-assertions', value=True),
SetOption('--llbuild-assertions', value=True),
SetOption('--swift-assertions', value=True),
SetOption('--swift-stdlib-assertions', value=True),
SetOption('-T', dest='validation_test', value=True),
SetOption('-o', dest='test_optimized', value=True),
SetOption('-s', dest='test_optimize_for_size', value=True),
SetOption('-y',
dest='test_optimize_none_with_implicit_dynamic', value=True),
SetOption('-t', dest='test', value=True),
SetOption('-a', dest='assertions', value=True),
# FIXME: Convert these options to set_false actions
SetOption('--no-assertions', dest='assertions', value=False),
SetOption('-A', dest='assertions', value=False),
SetOption('--no-lldb-assertions', dest='lldb_assertions', value=False),
SetOption('--no-llvm-assertions', dest='llvm_assertions', value=False),
SetOption('--no-llbuild-assertions',
dest='llbuild_assertions', value=False),
SetOption('--no-swift-assertions', dest='swift_assertions', value=False),
SetOption('--no-swift-stdlib-assertions',
dest='swift_stdlib_assertions', value=False),
SetOption('--skip-ios', dest='ios', value=False),
SetOption('--skip-tvos', dest='tvos', value=False),
SetOption('--skip-watchos', dest='watchos', value=False),
SetTrueOption('--benchmark'),
SetTrueOption('--clean'),
SetTrueOption('--dry-run'),
SetTrueOption('--dump-config'),
SetTrueOption('--disable-guaranteed-normal-arguments'),
SetTrueOption('--enable-stdlibcore-exclusivity-checking'),
SetTrueOption('--force-optimized-typechecker'),
SetTrueOption('--ios'),
SetTrueOption('--llbuild', dest='build_llbuild'),
SetTrueOption('--lldb', dest='build_lldb'),
SetTrueOption('--libcxx', dest='build_libcxx'),
SetTrueOption('--playgroundsupport', dest='build_playgroundsupport'),
SetTrueOption('--skip-build'),
SetTrueOption('--swiftpm', dest='build_swiftpm'),
SetTrueOption('--swiftsyntax', dest='build_swiftsyntax'),
SetTrueOption('--build-libparser-only', dest='build_libparser_only'),
SetTrueOption('--skstresstester', dest='build_skstresstester'),
SetTrueOption('--swiftevolve', dest='build_swiftevolve'),
SetTrueOption('-B', dest='benchmark'),
SetTrueOption('-S', dest='skip_build'),
SetTrueOption('-b', dest='build_llbuild'),
SetTrueOption('-c', dest='clean'),
SetTrueOption('-i', dest='ios'),
SetTrueOption('-l', dest='build_lldb'),
SetTrueOption('-n', dest='dry_run'),
SetTrueOption('-p', dest='build_swiftpm'),
SetTrueOption('--legacy-impl', dest='legacy_impl'),
EnableOption('--android'),
EnableOption('--build-external-benchmarks'),
EnableOption('--build-ninja'),
EnableOption('--build-runtime-with-host-compiler'),
EnableOption('--build-swift-dynamic-sdk-overlay'),
EnableOption('--build-swift-dynamic-stdlib'),
EnableOption('--build-swift-static-sdk-overlay'),
EnableOption('--build-swift-static-stdlib'),
EnableOption('--build-swift-stdlib-unittest-extra'),
EnableOption('--distcc'),
EnableOption('--enable-asan'),
EnableOption('--enable-experimental-differentiable-programming'),
EnableOption('--enable-lsan'),
EnableOption('--enable-sanitize-coverage'),
EnableOption('--enable-tsan'),
EnableOption('--enable-tsan-runtime'),
EnableOption('--enable-ubsan'),
EnableOption('--export-compile-commands'),
EnableOption('--foundation', dest='build_foundation'),
EnableOption('--host-test'),
EnableOption('--only-executable-test'),
EnableOption('--libdispatch', dest='build_libdispatch'),
EnableOption('--libicu', dest='build_libicu'),
EnableOption('--indexstore-db', dest='build_indexstoredb'),
EnableOption('--sourcekit-lsp', dest='build_sourcekitlsp'),
EnableOption('--install-swiftsyntax', dest='install_swiftsyntax'),
EnableOption('--skip-install-swiftsyntax-module',
dest='skip_install_swiftsyntax_module'),
EnableOption('--swiftsyntax-verify-generated-files',
dest='swiftsyntax_verify_generated_files'),
EnableOption('--install-swiftpm', dest='install_swiftpm'),
EnableOption('--install-sourcekit-lsp', dest='install_sourcekitlsp'),
EnableOption('--install-skstresstester', dest='install_skstresstester'),
EnableOption('--install-swiftevolve', dest='install_swiftevolve'),
EnableOption('--toolchain-benchmarks', dest='build_toolchainbenchmarks'),
EnableOption('--tsan-libdispatch-test'),
EnableOption('--long-test'),
EnableOption('--show-sdks'),
EnableOption('--skip-local-build'),
EnableOption('--stress-test'),
EnableOption('--test'),
EnableOption('--test-optimize-for-size'),
EnableOption('--test-optimize-none-with-implicit-dynamic'),
EnableOption('--test-optimized'),
EnableOption('--tvos'),
EnableOption('--validation-test'),
EnableOption('--verbose-build'),
EnableOption('--watchos'),
EnableOption('--xctest', dest='build_xctest'),
DisableOption('--skip-build-android', dest='build_android'),
DisableOption('--skip-build-benchmarks', dest='build_benchmarks'),
DisableOption('--skip-build-cygwin', dest='build_cygwin'),
DisableOption('--skip-build-freebsd', dest='build_freebsd'),
DisableOption('--skip-build-ios', dest='build_ios'),
DisableOption('--skip-build-ios-device', dest='build_ios_device'),
DisableOption('--skip-build-ios-simulator',
dest='build_ios_simulator'),
DisableOption('--skip-build-linux', dest='build_linux'),
DisableOption('--skip-build-osx', dest='build_osx'),
DisableOption('--skip-build-tvos', dest='build_tvos'),
DisableOption('--skip-build-tvos-device', dest='build_tvos_device'),
DisableOption('--skip-build-tvos-simulator',
dest='build_tvos_simulator'),
DisableOption('--skip-build-watchos', dest='build_watchos'),
DisableOption('--skip-build-watchos-device',
dest='build_watchos_device'),
DisableOption('--skip-build-watchos-simulator',
dest='build_watchos_simulator'),
DisableOption('--skip-test-android', dest='test_android'),
DisableOption('--skip-test-android-host', dest='test_android_host'),
DisableOption('--skip-test-cygwin', dest='test_cygwin'),
DisableOption('--skip-test-freebsd', dest='test_freebsd'),
DisableOption('--skip-test-ios', dest='test_ios'),
DisableOption('--skip-test-ios-32bit-simulator',
dest='test_ios_32bit_simulator'),
DisableOption('--skip-test-ios-host', dest='test_ios_host'),
DisableOption('--skip-test-ios-simulator', dest='test_ios_simulator'),
DisableOption('--skip-test-linux', dest='test_linux'),
DisableOption('--skip-test-osx', dest='test_osx'),
DisableOption('--skip-test-tvos', dest='test_tvos'),
DisableOption('--skip-test-tvos-host', dest='test_tvos_host'),
DisableOption('--skip-test-tvos-simulator',
dest='test_tvos_simulator'),
DisableOption('--skip-test-watchos', dest='test_watchos'),
DisableOption('--skip-test-watchos-host', dest='test_watchos_host'),
DisableOption('--skip-test-watchos-simulator',
dest='test_watchos_simulator'),
DisableOption('--skip-test-swiftpm', dest='test_swiftpm'),
DisableOption('--skip-test-swiftsyntax', dest='test_swiftsyntax'),
DisableOption('--skip-test-indexstore-db', dest='test_indexstoredb'),
DisableOption('--skip-test-sourcekit-lsp', dest='test_sourcekitlsp'),
DisableOption('--skip-test-skstresstester', dest='test_skstresstester'),
DisableOption('--skip-test-swiftevolve', dest='test_swiftevolve'),
DisableOption('--skip-test-toolchain-benchmarks',
dest='test_toolchainbenchmarks'),
DisableOption('--skip-build-clang-tools-extra',
dest='build_clang_tools_extra'),
ChoicesOption('--android-ndk-gcc-version',
choices=['4.8', '4.9']),
ChoicesOption('--compiler-vendor',
choices=['none', 'apple']),
ChoicesOption('--swift-analyze-code-coverage',
choices=['false', 'not-merged', 'merged']),
ChoicesOption('--android-arch',
choices=['armv7', 'aarch64']),
StrOption('--android-api-level'),
StrOption('--build-args'),
StrOption('--build-stdlib-deployment-targets'),
StrOption('--darwin-deployment-version-ios'),
StrOption('--darwin-deployment-version-osx'),
StrOption('--darwin-deployment-version-tvos'),
StrOption('--darwin-deployment-version-watchos'),
StrOption('--darwin-xcrun-toolchain'),
StrOption('--host-target'),
StrOption('--lit-args'),
StrOption('--llvm-targets-to-build'),
StrOption('--stdlib-deployment-targets'),
StrOption('--swift-darwin-module-archs'),
StrOption('--swift-darwin-supported-archs'),
PathOption('--android-deploy-device-path'),
PathOption('--android-icu-i18n'),
PathOption('--android-icu-i18n-include'),
PathOption('--android-icu-uc'),
PathOption('--android-icu-uc-include'),
PathOption('--android-icu-data'),
PathOption('--android-ndk'),
PathOption('--build-subdir'),
PathOption('--clang-profile-instr-use'),
PathOption('--cmake'),
PathOption('--coverage-db'),
PathOption('--host-cc'),
PathOption('--host-cxx'),
PathOption('--host-libtool'),
PathOption('--host-lipo'),
PathOption('--install-prefix'),
PathOption('--install-symroot'),
PathOption('--install-destdir'),
PathOption('--symbols-package'),
PathOption('--cmake-c-launcher'),
PathOption('--cmake-cxx-launcher'),
IntOption('--benchmark-num-o-iterations'),
IntOption('--benchmark-num-onone-iterations'),
IntOption('--jobs', dest='build_jobs'),
IntOption('--llvm-max-parallel-lto-link-jobs'),
IntOption('--swift-tools-max-parallel-lto-link-jobs'),
IntOption('-j', dest='build_jobs'),
AppendOption('--cross-compile-hosts'),
AppendOption('--extra-cmake-options'),
AppendOption('--extra-swift-args'),
AppendOption('--test-paths'),
UnsupportedOption('--build-jobs'),
UnsupportedOption('--common-cmake-options'),
UnsupportedOption('--only-execute'),
UnsupportedOption('--skip-test-optimize-for-size'),
UnsupportedOption('--skip-test-optimize-none-with-implicit-dynamic'),
UnsupportedOption('--skip-test-optimized'),
# Options forwared to build-script-impl
BuildScriptImplOption('--skip-test-swift', dest='impl_skip_test_swift'),
BuildScriptImplOption('--install-swift', dest='impl_install_swift'),
# NOTE: LTO flag is a special case that acts both as an option and has
# valid choices
SetOption('--lto', dest='lto_type'),
ChoicesOption('--lto', dest='lto_type', choices=['thin', 'full']),
# NOTE: We'll need to manually test the behavior of these since they
# validate compiler version strings.
IgnoreOption('--clang-compiler-version'),
IgnoreOption('--clang-user-visible-version'),
IgnoreOption('--swift-compiler-version'),
IgnoreOption('--swift-user-visible-version'),
# TODO: Migrate to unavailable options once new parser is in place
IgnoreOption('-I'),
IgnoreOption('--ios-all'),
IgnoreOption('--tvos-all'),
IgnoreOption('--watchos-all'),
]
| apache-2.0 |
napkindrawing/ansible | lib/ansible/plugins/shell/csh.py | 69 | 1478 | # (c) 2014, Chris Church <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.shell import ShellBase
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles
COMPATIBLE_SHELLS = frozenset(('csh', 'tcsh'))
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'csh'
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\\\n'
_SHELL_REDIRECT_ALLNULL = '>& /dev/null'
_SHELL_AND = '&&'
_SHELL_OR = '||'
_SHELL_SUB_LEFT = '"`'
_SHELL_SUB_RIGHT = '`"'
_SHELL_GROUP_LEFT = '('
_SHELL_GROUP_RIGHT = ')'
def env_prefix(self, **kwargs):
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
| gpl-3.0 |
rbarlow/pulp | client_consumer/test/unit/test_exception_handler.py | 15 | 1122 | from pulp.bindings import exceptions as bindings_exceptions
from pulp.client.consumer.exception_handler import ConsumerExceptionHandler
from pulp.client.extensions import exceptions
from pulp.client.extensions.core import TAG_FAILURE
from pulp.devel.unit import base
class ConsumerExceptionHandlerTests(base.PulpClientTests):
def setUp(self):
super(ConsumerExceptionHandlerTests, self).setUp()
self.handler = ConsumerExceptionHandler(self.prompt, self.config)
def test_permission(self):
"""
Tests a client-side error when the connection is rejected due to auth reasons.
"""
# Test
response_body = {'auth_error_code': 'authentication_failed'}
e = bindings_exceptions.PermissionsException(response_body)
e.error_message = "I've made a huge mistake."
code = self.handler.handle_permission(e)
# Verify
self.assertEqual(code, exceptions.CODE_PERMISSIONS_EXCEPTION)
self.assertTrue("I've made a huge mistake.\n" == self.recorder.lines[0])
self.assertEqual(TAG_FAILURE, self.prompt.get_write_tags()[0])
| gpl-2.0 |
Elucidation/ChessboardDetect | board_detect.py | 1 | 14132 | import cv2
import PIL.Image
import numpy as np
import sys
np.set_printoptions(suppress=True, precision=2)
def scaleImageIfNeeded(img, max_width=1024, max_height=1024):
"""Scale image down to max_width / max_height keeping aspect ratio if needed. Do nothing otherwise."""
# Input and Output is a numpy array
img = PIL.Image.fromarray(img)
img_width, img_height = img.size
print("Image size %dx%d" % (img_width, img_height))
aspect_ratio = min(float(max_width)/img_width, float(max_height)/img_height)
if aspect_ratio < 1.0:
new_width, new_height = ((np.array(img.size) * aspect_ratio)).astype(int)
print(" Resizing to %dx%d" % (new_width, new_height))
return np.array(img.resize((new_width,new_height)))
return np.array(img)
def getAngle(a,b,c):
# Get angle given 3 side lengths, in degrees
return np.arccos((a*a+b*b-c*c) / (2*a*b)) * 180.0 / np.pi
def getSegmentThetaRho(line):
x1,y1,x2,y2 = line
theta = np.math.atan2(y2-y1, x2-x1)
m = np.tan(theta)
# rho = np.abs(y1 + m*x1) / np.sqrt(m*m+1)
rho = x1*np.cos(theta) + y1*np.sin(theta)
return theta, rho
def getTwoLineSegmentIntersection(p,pr,q,qs):
# Uses http://stackoverflow.com/a/565282/2574639
# Given two line segments defined by sets of points
# p - pr and q - qs.
# Return the intersection point between them
# *assumes it always exists for our particular use-case*
# Convert to floats
p = p.astype(np.float32)
pr = pr.astype(np.float32)
q = q.astype(np.float32)
qs = qs.astype(np.float32)
r = pr-p
s = qs-q
# print(p, pr, r)
# print(q, qs, s)
rxs = np.cross(r, s)
if rxs == 0:
return [] # parallel
t = np.cross((q - p), s) / rxs
return p + t*r # intersect
def testTwoLineSegmentIntersection():
print("Test Two Line Segment Intersection")
a = np.array([0,0])
b = np.array([0,2])
c = np.array([1,0])
d = np.array([-1,1])
t = getTwoLineSegmentIntersection(a,b,c,d)
print(t)
print("Done")
def getSegmentTheta(line):
x1,y1,x2,y2 = line
theta = np.math.atan2(y2-y1, x2-x1)
return theta
def getSquareness(cnt, perfect_square_threshold=0.96):
# 4x2 array, rows are each point, columns are x and y
center = cnt.sum(axis=0)/4
# Side lengths of rectangular contour
dd0 = np.sqrt(((cnt[0,:] - cnt[1,:])**2).sum())
dd1 = np.sqrt(((cnt[1,:] - cnt[2,:])**2).sum())
dd2 = np.sqrt(((cnt[2,:] - cnt[3,:])**2).sum())
dd3 = np.sqrt(((cnt[3,:] - cnt[0,:])**2).sum())
# diagonal ratio
# xa = np.sqrt(((cnt[0,:] - cnt[2,:])**2).sum())
# xb = np.sqrt(((cnt[1,:] - cnt[3,:])**2).sum())
# xratio = xa/xb if xa < xb else xb/xa
side_ratio = dd0/dd1 if dd0 < dd1 else dd1/dd0
if side_ratio > perfect_square_threshold:
side_ratio = 1.0
return side_ratio
def is_square(cnt, eps=3.0, xratio_thresh = 0.5):
# 4x2 array, rows are each point, columns are x and y
center = cnt.sum(axis=0)/4
# Side lengths of rectangular contour
dd0 = np.sqrt(((cnt[0,:] - cnt[1,:])**2).sum())
dd1 = np.sqrt(((cnt[1,:] - cnt[2,:])**2).sum())
dd2 = np.sqrt(((cnt[2,:] - cnt[3,:])**2).sum())
dd3 = np.sqrt(((cnt[3,:] - cnt[0,:])**2).sum())
# diagonal ratio
xa = np.sqrt(((cnt[0,:] - cnt[2,:])**2).sum())
xb = np.sqrt(((cnt[1,:] - cnt[3,:])**2).sum())
xratio = xa/xb if xa < xb else xb/xa
# Check whether all points part of convex hull
# ie. not this http://i.stack.imgur.com/I6yJY.png
# all corner angles, angles are less than 180 deg, so not necessarily internal angles
ta = getAngle(dd3, dd0, xb)
tb = getAngle(dd0, dd1, xa)
tc = getAngle(dd1, dd2, xb)
td = getAngle(dd2, dd3, xa)
angle_sum = np.round(ta+tb+tc+td)
# All internal angles are at least 45 degrees but less than X degrees
good_angles = np.all(np.array([ta,tb,tc,td]) > 35) and np.all(np.array([ta,tb,tc,td]) < (140))
# side ratios
dda = dd0 / dd1
ddb = dd1 / dd2
ddc = dd0/dd2
ddd = dd1/dd3
# Return whether side ratios within certain ratio < epsilon
return (abs(1.0 - dda) < eps and abs(1.0 - ddb) < eps and
abs(1.0 - ddc) < 0.5 and abs(1.0 - ddd) < 0.5 and
xratio > xratio_thresh and angle_sum == 360 and good_angles)
def minimum_distance2(v, w, p):
# Return squared min distance between point p and line segment vw
# Via http://stackoverflow.com/a/1501725
# Return minimum distance between line segment vw and point p
l2 = np.sum((v - w)**2) # i.e. |w-v|^2 - avoid a sqrt
if (l2 == 0.0):
return np.sum((p - v)**2) # v == w case
# Consider the line extending the segment, parameterized as v + t (w - v).
# We find projection of point p onto the line.
# It falls where t = [(p-v) . (w-v)] / |w-v|^2
# We clamp t from [0,1] to handle points outside the segment vw.
t = max(0, min(1, np.dot(p - v, w - v) / l2))
projection = v + t * (w - v) # Projection falls on the segment
return np.sum((p - projection)**2)
def testMinDist():
print("Test min dist")
a = np.array([0,0])
b = np.array([0,1.3])
c = np.array([1.3,0.4])
print(np.sqrt(minimum_distance2(a,b,c)))
def getMinLineAngleDistance(a0, a1):
# Compare line angles (which can be 180 off from one another, or +- 180)
v0 = abs(a1-a0)
v1 = abs((a1+np.pi) - a0)
v2 = abs(a1 - (a0+np.pi))
return min([v0,v1,v2])
def getBestCorners(tile_corners, hough_lines, angle_threshold = 10*np.pi/180):
# Given 4x2 imperfect tile corners and Nx4 line segments
# Expects line segments and corner points to be in same cartesian space
#
# Find 4 best line segments that are best match to the tile corners
# and return the corners based off of those line segments, and those line segments
best_lines = np.zeros([4,4])
for i in range(4):
corner_theta = getSegmentTheta(tile_corners[[i,i,((i+1)%4),((i+1)%4)], [0,1,0,1]])
corner_ctr_pt = (tile_corners[i,:] + tile_corners[((i+1)%4),:]) / 2
best_d = 1e6
for line in hough_lines:
theta = getSegmentTheta(line)
# If angle within 10 degrees
# if abs(corner_theta - theta) < angle_threshold:
if getMinLineAngleDistance(corner_theta, theta) < angle_threshold:
d = minimum_distance2(line[:2], line[2:], corner_ctr_pt)
if d < best_d:
best_d = d
best_lines[i,:] = line
new_corners = tile_corners.copy()
for i in range(4):
x = getTwoLineSegmentIntersection(
best_lines[i,:2], best_lines[i,2:],
best_lines[(i+1)%4,:2], best_lines[(i+1)%4,2:])
# print(best_lines, x)
# print(best_lines[i,:2], best_lines[i,2:], best_lines[(i+1)%4,:2], best_lines[(i+1)%4,2:])
if any(x):
new_corners[i,:] = x
return new_corners, best_lines
def findPotentialTiles(img):
# blur img
# img = (1.2*img - 0.2*cv2.blur(img,(3,3))).astype(np.uint8)
img = cv2.bilateralFilter(img,3, 25, 75)
# img = cv2.medianBlur(img,3)
thresh = 100
edges_orig = cv2.Canny(img, thresh, thresh*2)
# Morphological Gradient to get internal squares of canny edges.
# kernel = np.ones((5,5),np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
edges = cv2.morphologyEx(edges_orig, cv2.MORPH_GRADIENT, kernel)
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
contours = np.array(contours) # Turn to np array
# Get dimmed image
# img = (img.copy() * 0.9).astype(np.uint8)
good_tiles = np.zeros(len(contours), dtype=bool)
for i in range(contours.size):
# Keep only internal contours (Has parent with findContour using cv2.RETR_CCOMP)
if (hierarchy[0,i,3] < 0):
# No parent found, skip outer contour
continue
# Approximate contour and update in place
contours[i] = cv2.approxPolyDP(contours[i],0.02*cv2.arcLength(contours[i],True),True)
# Only contours that fill an area of at least 8x8 pixels
if cv2.contourArea(contours[i]) < 8*8:
continue
# Only rectangular contours allowed
if len(contours[i]) != 4:
continue
# If rectangle is not square enough (even with leeway for perspective warp), remove
if not is_square(contours[i][:,0,:]):
continue
# Survived tests, is good tile
good_tiles[i] = True
# Prune bad contours
contours = contours[good_tiles]
# Calculate contour areas, then choose most common area
areas = np.array(list(map(cv2.contourArea, contours)))
# Sort contours by area size (largest first)
area_max_order = np.argsort(areas)[::-1]
contours = contours[area_max_order]
areas = areas[area_max_order]
med_area = np.median(areas)
good_areas = np.abs(areas - med_area) < 0.5*med_area
contours = contours[good_areas]
# chosen_tile_idx = np.argsort(areas)[len(areas)//2]
squareness_list = list(map(getSquareness, contours))
# Sort contours by order of most square
contours = contours[np.argsort(squareness_list)[::-1]]
# Now contours are sorted by most square and largest area first
return contours, 0, edges_orig
def getChosenTile(contours, chosen_tile_idx):
return contours[chosen_tile_idx][:,0,:].astype(np.float32)
def drawPotentialTiles(img, contours, chosen_tile_idx):
tile_corners = getChosenTile(contours, chosen_tile_idx)
# Draw contours
font = cv2.FONT_HERSHEY_PLAIN
for i, cnt in enumerate(contours):
if i == chosen_tile_idx:
cv2.drawContours(img,[cnt],0,(0,255,0),-1)
else:
cv2.drawContours(img,[cnt],0,(0,0,255),-1)
cv2.line(img, tuple(tile_corners[0,:]), tuple(tile_corners[1,:]), (0,0,180), thickness=2)
cv2.line(img, tuple(tile_corners[1,:]), tuple(tile_corners[2,:]), (0,180,0), thickness=2)
cv2.line(img, tuple(tile_corners[2,:]), tuple(tile_corners[3,:]), (180,0,0), thickness=2)
cv2.line(img, tuple(tile_corners[3,:]), tuple(tile_corners[0,:]), (0,0,0), thickness=2)
cv2.putText(img,'0', tuple(contours[chosen_tile_idx][0,0,:]-5), font, 0.8,(0,0,0), thickness=1)
cv2.putText(img,'1', tuple(contours[chosen_tile_idx][1,0,:]-5), font, 0.8,(0,0,0), thickness=1)
cv2.putText(img,'2', tuple(contours[chosen_tile_idx][2,0,:]-5), font, 0.8,(0,0,0), thickness=1)
cv2.putText(img,'3', tuple(contours[chosen_tile_idx][3,0,:]-5), font, 0.8,(0,0,0), thickness=1)
def drawSquareness(img, contours):
squareness_list = np.array(list(map(getSquareness, contours)))
# print(squareness_list)
font = cv2.FONT_HERSHEY_PLAIN
for i, cnt in enumerate(contours):
cv2.putText(img,'%.2f'%squareness_list[i], tuple(contours[i][0,0,:]-5), font, 0.5,(0,0,0), thickness=1)
def refineTile(img, edges, contours, chosen_tile_idx):
tile_corners = getChosenTile(contours, chosen_tile_idx)
tile_size = tile_corners.max(axis=0) - tile_corners.min(axis=0)
tile_center = tile_corners.mean(axis=0)
bbox_size_ratio = 4
roi_bbox = np.hstack([tile_center-tile_size*bbox_size_ratio,tile_center+tile_size*bbox_size_ratio]).astype(int)
# clamp bbox to img edges
r,c,_ = img.shape
roi_bbox[roi_bbox<0]=0
roi_bbox[roi_bbox>[c,r,c,r]]= np.array([c,r,c,r])[roi_bbox>[c,r,c,r]]
cv2.rectangle(img,tuple(roi_bbox[:2]),tuple(roi_bbox[2:]),(0,255,0),3)
edges_roi = edges[ roi_bbox[1]:roi_bbox[3], roi_bbox[0]:roi_bbox[2] ]
tile_side = int(tile_size.min())
lines_roi = cv2.HoughLinesP(edges_roi,1,np.pi/180.0, tile_side, minLineLength=tile_side, maxLineGap=tile_side)
if not np.any(lines_roi):
print("No lines found")
return
lines_roi = lines_roi[:,0,:]
# for line in lines_roi:
# line = (line + roi_bbox[[0,1,0,1]]).astype(np.int)
# print("---")
hough_lines = np.add(lines_roi, roi_bbox[[0,1,0,1]])
hough_corners, corner_hough_lines = getBestCorners(tile_corners, hough_lines)
return hough_corners, corner_hough_lines, edges_roi
def drawBestHoughLines(img, hough_corners, corner_hough_lines):
# print(hough_corners)
# print(corner_hough_lines)
for line in corner_hough_lines:
cv2.line(img, tuple(line[:2].astype(np.int)), tuple(line[2:].astype(np.int)), (255,255,255), thickness=2)
for i in range(4):
cv2.circle(img,
tuple(hough_corners[i,:]), 1, (0,0,0),thickness=-1)
# print("---")
# Draw 2x-chessboard expanded tile using simplistic multiplier instead of perspective transform
# hough_tile_center = hough_corners.mean(axis=0)
# expanded_tile_corners = hough_tile_center + (hough_corners - hough_tile_center)*(16+4)
# cv2.polylines(img, [expanded_tile_corners.astype(np.int32)], True, (150,50,255), thickness=2)
# -8 to 8
# Single tile warp
# M = cv2.getPerspectiveTransform(hough_corners,
# (tile_res)*(ideal_tile+8+1))
# expanded tile area warp
# M = cv2.getPerspectiveTransform(expanded_tile_corners,
# (tile_res)*(ideal_tile*tile_buffer))
# print(M)
# side_len = tile_res*(tile_buffer)
# side_len = tile_res*(8 + 1 + tile_buffer)
# out_img = cv2.warpPerspective(img, M,
# (side_len, side_len))
def main(filenames):
for filename in filenames:
img = cv2.imread(filename)
img = scaleImageIfNeeded(img)
contours, chosen_tile_idx, edges = findPotentialTiles(img)
drawPotentialTiles(img, contours, chosen_tile_idx)
tile_corners = getChosenTile(contours, chosen_tile_idx)
hough_corners, corner_hough_lines, edges_roi = refineTile(img, edges, contours, chosen_tile_idx)
drawBestHoughLines(img, hough_corners, corner_hough_lines)
# # Single tile warp
# tile_res=64
# M = cv2.getPerspectiveTransform(hough_corners,
# (tile_res)*(ideal_tile+8+1))
# side_len = tile_res*(8 + 1 + tile_buffer)
# out_img = cv2.warpPerspective(img, M,
# (side_len, side_len))
drawSquareness(img, contours)
if img.size < 1000*1000:
img = cv2.resize(img,None,fx=2, fy=2)
edges_roi = cv2.resize(edges_roi,None,fx=2, fy=2)
cv2.imshow(filename,img)
cv2.imshow('edges',edges_roi)
# cv2.imshow('%s_warped' % filename,out_img)
# cv2.imshow('ROI',edges_roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
if len(sys.argv) > 1:
filenames = sys.argv[1:]
else:
filenames = ['input/2.jpg']
print("Loading", filenames)
main(filenames) | mit |
rabipanda/tensorflow | tensorflow/compiler/tests/reverse_sequence_op_test.py | 15 | 3457 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ReverseSequenceTest(XLATestCase):
def _testReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
expected_err_re=None):
with self.test_session():
p = array_ops.placeholder(dtypes.as_dtype(x.dtype))
lengths = array_ops.placeholder(dtypes.as_dtype(seq_lengths.dtype))
with self.test_scope():
ans = array_ops.reverse_sequence(
p, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=lengths)
if expected_err_re is None:
tf_ans = ans.eval(feed_dict={p: x, lengths: seq_lengths})
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval(feed_dict={p: x, lengths: seq_lengths})
def testSimple(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
expected = np.array([[1, 2, 3], [6, 5, 4], [8, 7, 9]], dtype=np.int32)
self._testReverseSequence(
x,
batch_axis=0,
seq_axis=1,
seq_lengths=np.array([1, 3, 2], np.int32),
truth=expected)
def _testBasic(self, dtype, len_dtype):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]]
], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
def testSeqLength(self):
for dtype in self.all_types:
for seq_dtype in self.int_types:
self._testBasic(dtype, seq_dtype)
if __name__ == "__main__":
test.main()
| apache-2.0 |
dsyang/buck | third-party/py/pywatchman/pywatchman/capabilities.py | 29 | 2793 | # Copyright 2015 Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import re
def parse_version(vstr):
res = 0
for n in vstr.split('.'):
res = res * 1000
res = res + int(n)
return res
cap_versions = {
"cmd-watch-del-all": "3.1.1",
"cmd-watch-project": "3.1",
"relative_root": "3.3",
"term-dirname": "3.1",
"term-idirname": "3.1",
"wildmatch": "3.7",
}
def check(version, name):
if name in cap_versions:
return version >= parse_version(cap_versions[name])
return False
def synthesize(vers, opts):
""" Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
"""
parsed_version = parse_version(vers['version'])
vers['capabilities'] = {}
for name in opts['optional']:
vers['capabilities'][name] = check(parsed_version, name)
failed = False
for name in opts['required']:
have = check(parsed_version, name)
vers['capabilities'][name] = have
if not have:
vers['error'] = 'client required capability `' + name + \
'` is not supported by this server'
return vers
| apache-2.0 |
calvingit21/h2o-2 | py/testdir_multi_jvm/notest_rf_10ktrees_fvec.py | 9 | 1680 | import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(2)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_10ktrees_fvec(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
# always match the run below!
# just using one file for now
for x in [1000]:
shCmdString = "perl " + h2o.find_file("syn_scripts/parity.pl") + " 128 4 "+ str(x) + " quad " + SYNDATASETS_DIR
h2o.spawn_cmd_and_wait('parity.pl', shCmdString.split(),4)
csvFilename = "parity_128_4_" + str(x) + "_quad.data"
# always match the gen above!
for trial in range (1,3):
sys.stdout.write('.')
sys.stdout.flush()
csvFilename = "parity_128_4_" + str(1000) + "_quad.data"
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
parseResult = h2o_cmd.parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=30)
h2o.verboseprint("Trial", trial)
start = time.time()
h2o_cmd.runRF(parseResult=parseResult, trees=10000, max_depth=2, timeoutSecs=900, retryDelaySecs=3)
print "RF #", trial, "end on ", csvFilename, 'took', time.time() - start, 'seconds'
print "Waiting 60 secs for TIME_WAIT sockets to go away"
time.sleep(60)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
jackkiej/SickRage | lib/hachoir_parser/program/exe_ne.py | 95 | 3543 | from hachoir_core.field import (FieldSet,
Bit, UInt8, UInt16, UInt32, Bytes,
PaddingBits, PaddingBytes, NullBits, NullBytes)
from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
class NE_Header(FieldSet):
static_size = 64*8
def createFields(self):
yield Bytes(self, "signature", 2, "New executable signature (NE)")
yield UInt8(self, "link_ver", "Linker version number")
yield UInt8(self, "link_rev", "Linker revision number")
yield UInt16(self, "entry_table_ofst", "Offset to the entry table")
yield UInt16(self, "entry_table_size", "Length (in bytes) of the entry table")
yield PaddingBytes(self, "reserved[]", 4)
yield Bit(self, "is_dll", "Is a dynamic-link library (DLL)?")
yield Bit(self, "is_win_app", "Is a Windows application?")
yield PaddingBits(self, "reserved[]", 9)
yield Bit(self, "first_seg_code", "First segment contains code that loads the application?")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "link_error", "Load even if linker detects errors?")
yield NullBits(self, "reserved[]", 1)
yield Bit(self, "is_lib", "Is a library module?")
yield UInt16(self, "auto_data_seg", "Automatic data segment number")
yield filesizeHandler(UInt16(self, "local_heap_size", "Initial size (in bytes) of the local heap"))
yield filesizeHandler(UInt16(self, "stack_size", "Initial size (in bytes) of the stack"))
yield textHandler(UInt32(self, "cs_ip", "Value of CS:IP"), hexadecimal)
yield textHandler(UInt32(self, "ss_sp", "Value of SS:SP"), hexadecimal)
yield UInt16(self, "nb_entry_seg_tab", "Number of entries in the segment table")
yield UInt16(self, "nb_entry_modref_tab", "Number of entries in the module-reference table")
yield filesizeHandler(UInt16(self, "size_nonres_name_tab", "Number of bytes in the nonresident-name table"))
yield UInt16(self, "seg_tab_ofs", "Segment table offset")
yield UInt16(self, "rsrc_ofs", "Resource offset")
yield UInt16(self, "res_name_tab_ofs", "Resident-name table offset")
yield UInt16(self, "mod_ref_tab_ofs", "Module-reference table offset")
yield UInt16(self, "import_tab_ofs", "Imported-name table offset")
yield UInt32(self, "non_res_name_tab_ofs", "Nonresident-name table offset")
yield UInt16(self, "nb_mov_ent_pt", "Number of movable entry points")
yield UInt16(self, "log2_sector_size", "Log2 of the segment sector size")
yield UInt16(self, "nb_rsrc_seg", "Number of resource segments")
yield Bit(self, "unknown_os_format", "Operating system format is unknown")
yield PaddingBits(self, "reserved[]", 1)
yield Bit(self, "os_windows", "Operating system is Microsoft Windows")
yield NullBits(self, "reserved[]", 6)
yield Bit(self, "is_win20_prot", "Is Windows 2.x application running in version 3.x protected mode")
yield Bit(self, "is_win20_font", "Is Windows 2.x application supporting proportional fonts")
yield Bit(self, "fast_load", "Contains a fast-load area?")
yield NullBits(self, "reserved[]", 4)
yield UInt16(self, "fastload_ofs", "Fast-load area offset (in sector)")
yield UInt16(self, "fastload_size", "Fast-load area length (in sector)")
yield NullBytes(self, "reserved[]", 2)
yield textHandler(UInt16(self, "win_version", "Expected Windows version number"), hexadecimal)
| gpl-3.0 |
openatv/enigma2 | lib/python/Screens/PictureInPicture.py | 10 | 7903 | from Screens.Screen import Screen
from Screens.Dish import Dishpip
from enigma import ePoint, eSize, eRect, eServiceCenter, getBestPlayableServiceReference, eServiceReference, eTimer
from Components.SystemInfo import SystemInfo
from Components.VideoWindow import VideoWindow
from Components.config import config, ConfigPosition, ConfigYesNo, ConfigSelection
from Tools import Notifications
from Screens.MessageBox import MessageBox
from os import access, W_OK
MAX_X = 720
MAX_Y = 576
pip_config_initialized = False
PipPigModeEnabled = False
PipPigModeTimer = eTimer()
def timedStopPipPigMode():
from Screens.InfoBar import InfoBar
if InfoBar.instance and InfoBar.instance.session:
if SystemInfo["hasPIPVisibleProc"]:
open(SystemInfo["hasPIPVisibleProc"], "w").write("1")
elif hasattr(InfoBar.instance.session, "pip"):
InfoBar.instance.session.pip.playService(InfoBar.instance.session.pip.currentService)
global PipPigModeEnabled
PipPigModeEnabled = False
PipPigModeTimer.callback.append(timedStopPipPigMode)
def PipPigMode(value):
from Screens.InfoBar import InfoBar
if InfoBar.instance and InfoBar.instance.session and hasattr(InfoBar.instance.session, "pip") and config.av.pip_mode.value != "external":
if value:
PipPigModeTimer.stop()
global PipPigModeEnabled
if not PipPigModeEnabled:
if SystemInfo["hasPIPVisibleProc"]:
open(SystemInfo["hasPIPVisibleProc"], "w").write("0")
else:
InfoBar.instance.session.pip.pipservice = False
PipPigModeEnabled = True
else:
PipPigModeTimer.start(100, True)
class PictureInPictureZapping(Screen):
skin = """<screen name="PictureInPictureZapping" flags="wfNoBorder" position="50,50" size="90,26" title="PiPZap" zPosition="-1">
<eLabel text="PiP-Zap" position="0,0" size="90,26" foregroundColor="#00ff66" font="Regular;26" />
</screen>"""
class PictureInPicture(Screen):
def __init__(self, session):
global pip_config_initialized
Screen.__init__(self, session)
self["video"] = VideoWindow()
self.pipActive = session.instantiateDialog(PictureInPictureZapping)
self.dishpipActive = session.instantiateDialog(Dishpip)
self.currentService = None
self.currentServiceReference = None
self.choicelist = [("standard", _("Standard"))]
if SystemInfo["VideoDestinationConfigurable"]:
self.choicelist.append(("cascade", _("Cascade PiP")))
self.choicelist.append(("split", _("Splitscreen")))
self.choicelist.append(("byside", _("Side by side")))
self.choicelist.append(("bigpig", _("Big PiP")))
if SystemInfo["HasExternalPIP"]:
self.choicelist.append(("external", _("External PiP")))
if not pip_config_initialized:
config.av.pip = ConfigPosition(default=[510, 28, 180, 135], args = (MAX_X, MAX_Y, MAX_X, MAX_Y))
config.av.pip_mode = ConfigSelection(default="standard", choices=self.choicelist)
pip_config_initialized = True
self.onLayoutFinish.append(self.LayoutFinished)
def __del__(self):
del self.pipservice
self.setExternalPiP(False)
self.setSizePosMainWindow()
if hasattr(self, "dishpipActive") and self.dishpipActive is not None:
self.dishpipActive.setHide()
def relocate(self):
x = config.av.pip.value[0]
y = config.av.pip.value[1]
w = config.av.pip.value[2]
h = config.av.pip.value[3]
self.move(x, y)
self.resize(w, h)
def LayoutFinished(self):
self.onLayoutFinish.remove(self.LayoutFinished)
self.relocate()
self.setExternalPiP(config.av.pip_mode.value == "external")
def move(self, x, y):
config.av.pip.value[0] = x
config.av.pip.value[1] = y
w = config.av.pip.value[2]
h = config.av.pip.value[3]
if config.av.pip_mode.value == "cascade":
x = MAX_X - w
y = 0
elif config.av.pip_mode.value == "split":
x = MAX_X / 2
y = 0
elif config.av.pip_mode.value == "byside":
x = MAX_X / 2
y = MAX_Y / 4
elif config.av.pip_mode.value in "bigpig external":
x = 0
y = 0
config.av.pip.save()
self.instance.move(ePoint(x, y))
def resize(self, w, h):
config.av.pip.value[2] = w
config.av.pip.value[3] = h
config.av.pip.save()
if config.av.pip_mode.value == "standard":
self.instance.resize(eSize(*(w, h)))
self["video"].instance.resize(eSize(*(w, h)))
self.setSizePosMainWindow()
elif config.av.pip_mode.value == "cascade":
self.instance.resize(eSize(*(w, h)))
self["video"].instance.resize(eSize(*(w, h)))
self.setSizePosMainWindow(0, h, MAX_X - w, MAX_Y - h)
elif config.av.pip_mode.value == "split":
self.instance.resize(eSize(*(MAX_X/2, MAX_Y )))
self["video"].instance.resize(eSize(*(MAX_X/2, MAX_Y)))
self.setSizePosMainWindow(0, 0, MAX_X/2, MAX_Y)
elif config.av.pip_mode.value == "byside":
self.instance.resize(eSize(*(MAX_X/2, MAX_Y/2 )))
self["video"].instance.resize(eSize(*(MAX_X/2, MAX_Y/2)))
self.setSizePosMainWindow(0, MAX_Y/4, MAX_X/2, MAX_Y/2)
elif config.av.pip_mode.value in "bigpig external":
self.instance.resize(eSize(*(MAX_X, MAX_Y)))
self["video"].instance.resize(eSize(*(MAX_X, MAX_Y)))
self.setSizePosMainWindow()
def setSizePosMainWindow(self, x = 0, y = 0, w = 0, h = 0):
if SystemInfo["VideoDestinationConfigurable"]:
self["video"].instance.setFullScreenPosition(eRect(x, y, w, h))
def setExternalPiP(self, onoff):
if SystemInfo["HasExternalPIP"]:
open(SystemInfo["HasExternalPIP"], "w").write(onoff and "on" or "off")
def active(self):
self.pipActive.show()
def inactive(self):
self.pipActive.hide()
def getPosition(self):
return self.instance.position().x(), self.instance.position().y()
def getSize(self):
return self.instance.size().width(), self.instance.size().height()
def togglePiPMode(self):
self.setMode(config.av.pip_mode.choices[(config.av.pip_mode.index + 1) % len(config.av.pip_mode.choices)])
def setMode(self, mode):
config.av.pip_mode.value = mode
config.av.pip_mode.save()
self.setExternalPiP(config.av.pip_mode.value == "external")
self.relocate()
def getMode(self):
return config.av.pip_mode.value
def getModeName(self):
return self.choicelist[config.av.pip_mode.index][1]
def playService(self, service):
if service is None:
return False
ref = self.resolveAlternatePipService(service)
if ref:
if self.isPlayableForPipService(ref):
print "playing pip service", ref and ref.toString()
else:
if not config.usage.hide_zap_errors.value:
Notifications.AddPopup(text = _("No free tuner!"), type = MessageBox.TYPE_ERROR, timeout = 5, id = "ZapPipError")
return False
self.pipservice = eServiceCenter.getInstance().play(ref)
if self.pipservice and not self.pipservice.setTarget(1, True):
if hasattr(self, "dishpipActive") and self.dishpipActive is not None:
self.dishpipActive.startPiPService(ref)
self.pipservice.start()
self.currentService = service
self.currentServiceReference = ref
return True
else:
self.pipservice = None
self.currentService = None
self.currentServiceReference = None
if not config.usage.hide_zap_errors.value:
Notifications.AddPopup(text = _("Incorrect type service for PiP!"), type = MessageBox.TYPE_ERROR, timeout = 5, id = "ZapPipError")
return False
def getCurrentService(self):
return self.currentService
def getCurrentServiceReference(self):
return self.currentServiceReference
def isPlayableForPipService(self, service):
playingref = self.session.nav.getCurrentlyPlayingServiceReference()
if playingref is None or service == playingref:
return True
info = eServiceCenter.getInstance().info(service)
oldref = self.currentServiceReference or eServiceReference()
if info and info.isPlayable(service, oldref):
return True
return False
def resolveAlternatePipService(self, service):
if service and (service.flags & eServiceReference.isGroup):
oldref = self.currentServiceReference or eServiceReference()
return getBestPlayableServiceReference(service, oldref)
return service
| gpl-2.0 |
gunan/tensorflow | tensorflow/python/data/experimental/benchmarks/map_and_batch_benchmark.py | 9 | 8438 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.map_and_batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import itertools
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
_NUMPY_RANDOM_SEED = 42
class MapAndBatchBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.map_and_batch()`."""
def benchmark_map_and_batch(self):
"""Measures the performance of parallelized batching."""
shapes = [(), (10,), (10, 10), (10, 10, 10), (224, 224, 3)]
batch_size_values = [1, 32, 64, 128, 1024]
shape_placeholder = array_ops.placeholder(dtypes.int64, shape=[None])
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset_ops.Dataset.range(1000000000)
dense_value = random_ops.random_normal(shape=shape_placeholder)
dataset = dataset.apply(batching.map_and_batch(
lambda _: dense_value, batch_size_placeholder))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
for shape in shapes:
for batch_size in batch_size_values:
with session.Session() as sess:
sess.run(iterator.initializer, feed_dict={
shape_placeholder: shape, batch_size_placeholder: batch_size})
# Use a C++ callable to minimize the Python overhead in the benchmark.
callable_opts = config_pb2.CallableOptions()
callable_opts.target.append(next_element.op.name)
op_callable = sess._make_callable_from_options(callable_opts) # pylint: disable=protected-access
# Run five steps to warm up the session caches before taking the
# first measurement.
for _ in range(5):
op_callable()
deltas = []
overall_start = time.time()
# Run at least five repetitions and for at least five seconds.
while len(deltas) < 5 or time.time() - overall_start < 5.0:
start = time.time()
for _ in range(100):
op_callable()
end = time.time()
deltas.append(end - start)
del op_callable
median_wall_time = np.median(deltas) / 100.0
iters = len(deltas) * 100
self.report_benchmark(
iters=iters, wall_time=median_wall_time,
name="num_elements_%d_batch_size_%d" % (np.prod(shape), batch_size))
def benchmark_map_and_batch_chaining_versus_fusing(self):
"""Compares the performance of chaining and fusing map and batch.
NOTE: It is recommended to build the benchmark with
`-c opt --copt=-mavx --copt=-mavx2 --copt=-mfma --copt=-gmlt`
and execute it on a machine with at least 32 CPU cores.
"""
# Sequential pipeline configurations.
seq_elem_size_series = itertools.product([1], [1], [1, 2, 4, 8], [16])
seq_batch_size_series = itertools.product([1], [1], [1], [8, 16, 32, 64])
# Parallel pipeline configuration.
par_elem_size_series = itertools.product([32], [32], [1, 2, 4, 8], [256])
par_batch_size_series = itertools.product([32], [32], [1],
[128, 256, 512, 1024])
par_num_calls_series = itertools.product([8, 16, 32, 64], [32], [1], [512])
par_inter_op_series = itertools.product([32], [8, 16, 32, 64], [1], [512])
def name(method, label, num_calls, inter_op, element_size, batch_size):
return ("%s_id_%s_num_calls_%d_inter_op_%d_elem_size_%d_batch_size_%d" % (
method,
hashlib.sha1((label).encode("utf-8")).hexdigest()[:8],
num_calls,
inter_op,
element_size,
batch_size,
))
def benchmark(label, series):
"""Runs benchmark the given series."""
def make_dataset(element_size, num_calls, batch_size): # pylint: disable=missing-docstring
k = 1024 * 1024
x = constant_op.constant(np.random.rand(element_size, 4 * k))
y = constant_op.constant(np.random.rand(4 * k, 1))
dataset = dataset_ops.Dataset.range(1000000000000).map(lambda _: (x, y))
dataset = dataset.map(
math_ops.matmul,
num_parallel_calls=num_calls).batch(batch_size=batch_size)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
return dataset.with_options(options)
for num_calls, inter_op, element_size, batch_size in series:
num_iters = 1024 // (
(element_size * batch_size) // min(num_calls, inter_op))
# By default the chained map().batch() calls will not be fused.
chained_dataset = make_dataset(element_size, num_calls, batch_size)
chained_iterator = dataset_ops.make_one_shot_iterator(chained_dataset)
chained_get_next = chained_iterator.get_next()
chained_deltas = []
with session.Session(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=inter_op,
use_per_session_threads=True)) as sess:
for _ in range(5):
sess.run(chained_get_next.op)
for _ in range(num_iters):
start = time.time()
sess.run(chained_get_next.op)
end = time.time()
chained_deltas.append(end - start)
self.report_benchmark(
iters=num_iters,
wall_time=np.median(chained_deltas),
name=name("chained", label, num_calls, inter_op, element_size,
batch_size))
# Apply an option to the default dataset that will fuse map().batch().
options = dataset_ops.Options()
options.experimental_optimization.map_and_batch_fusion = True
fused_dataset = chained_dataset.with_options(options)
fused_iterator = dataset_ops.make_one_shot_iterator(fused_dataset)
fused_get_next = fused_iterator.get_next()
fused_deltas = []
with session.Session(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=inter_op,
use_per_session_threads=True)) as sess:
for _ in range(5):
sess.run(fused_get_next.op)
for _ in range(num_iters):
start = time.time()
sess.run(fused_get_next.op)
end = time.time()
fused_deltas.append(end - start)
self.report_benchmark(
iters=num_iters,
wall_time=np.median(fused_deltas),
name=name("fused", label, num_calls, inter_op, element_size,
batch_size))
print()
np.random.seed(_NUMPY_RANDOM_SEED)
benchmark("Sequential element size evaluation", seq_elem_size_series)
benchmark("Sequential batch size evaluation", seq_batch_size_series)
benchmark("Parallel element size evaluation", par_elem_size_series)
benchmark("Parallel batch size evaluation", par_batch_size_series)
benchmark("Transformation parallelism evaluation", par_num_calls_series)
benchmark("Threadpool size evaluation", par_inter_op_series)
if __name__ == "__main__":
test.main()
| apache-2.0 |
xrmx/django | tests/custom_columns/tests.py | 207 | 4090 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import Article, Author
class CustomColumnsTests(TestCase):
def setUp(self):
self.a1 = Author.objects.create(first_name="John", last_name="Smith")
self.a2 = Author.objects.create(first_name="Peter", last_name="Jones")
self.authors = [self.a1, self.a2]
self.article = Article.objects.create(headline="Django lets you build Web apps easily", primary_author=self.a1)
self.article.authors = self.authors
def test_query_all_available_authors(self):
self.assertQuerysetEqual(
Author.objects.all(), [
"Peter Jones", "John Smith",
],
six.text_type
)
def test_get_first_name(self):
self.assertEqual(
Author.objects.get(first_name__exact="John"),
self.a1,
)
def test_filter_first_name(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact="John"), [
"John Smith",
],
six.text_type
)
def test_field_error(self):
self.assertRaises(
FieldError,
lambda: Author.objects.filter(firstname__exact="John")
)
def test_attribute_error(self):
with self.assertRaises(AttributeError):
self.a1.firstname
with self.assertRaises(AttributeError):
self.a1.last
def test_get_all_authors_for_an_article(self):
self.assertQuerysetEqual(
self.article.authors.all(), [
"Peter Jones",
"John Smith",
],
six.text_type
)
def test_get_all_articles_for_an_author(self):
self.assertQuerysetEqual(
self.a1.article_set.all(), [
"Django lets you build Web apps easily",
],
lambda a: a.headline
)
def test_get_author_m2m_relation(self):
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'), [
"Peter Jones"
],
six.text_type
)
def test_author_querying(self):
self.assertQuerysetEqual(
Author.objects.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
def test_author_filtering(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact='John'),
['<Author: John Smith>']
)
def test_author_get(self):
self.assertEqual(self.a1, Author.objects.get(first_name__exact='John'))
def test_filter_on_nonexistent_field(self):
self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'firstname' into field. Choices are: Author_ID, article, first_name, last_name, primary_set",
Author.objects.filter,
firstname__exact='John'
)
def test_author_get_attributes(self):
a = Author.objects.get(last_name__exact='Smith')
self.assertEqual('John', a.first_name)
self.assertEqual('Smith', a.last_name)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'firstname'",
getattr,
a, 'firstname'
)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'last'",
getattr,
a, 'last'
)
def test_m2m_table(self):
self.assertQuerysetEqual(
self.article.authors.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
self.assertQuerysetEqual(
self.a1.article_set.all(),
['<Article: Django lets you build Web apps easily>']
)
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'),
['<Author: Peter Jones>']
)
| bsd-3-clause |
JesseLivezey/sklearn-theano | sklearn_theano/externals/google/protobuf/service_reflection.py | 243 | 11023 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains metaclasses used to create protocol service and service stub
classes from ServiceDescriptor objects at runtime.
The GeneratedServiceType and GeneratedServiceStubType metaclasses are used to
inject all useful functionality into the classes output by the protocol
compiler at compile-time.
"""
__author__ = '[email protected] (Petar Petrov)'
class GeneratedServiceType(type):
"""Metaclass for service classes created at runtime from ServiceDescriptors.
Implementations for all methods described in the Service class are added here
by this class. We also create properties to allow getting/setting all fields
in the protocol message.
The protocol compiler currently uses this metaclass to create protocol service
classes at runtime. Clients can also manually create their own classes at
runtime, as in this example:
mydescriptor = ServiceDescriptor(.....)
class MyProtoService(service.Service):
__metaclass__ = GeneratedServiceType
DESCRIPTOR = mydescriptor
myservice_instance = MyProtoService()
...
"""
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
"""Creates a message service class.
Args:
name: Name of the class (ignored, but required by the metaclass
protocol).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service class is subclassed.
if GeneratedServiceType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceType._DESCRIPTOR_KEY]
service_builder = _ServiceBuilder(descriptor)
service_builder.BuildService(cls)
class GeneratedServiceStubType(GeneratedServiceType):
"""Metaclass for service stubs created at runtime from ServiceDescriptors.
This class has similar responsibilities as GeneratedServiceType, except that
it creates the service stub classes.
"""
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __init__(cls, name, bases, dictionary):
"""Creates a message service stub class.
Args:
name: Name of the class (ignored, here).
bases: Base classes of the class being constructed.
dictionary: The class dictionary of the class being constructed.
dictionary[_DESCRIPTOR_KEY] must contain a ServiceDescriptor object
describing this protocol service type.
"""
super(GeneratedServiceStubType, cls).__init__(name, bases, dictionary)
# Don't do anything if this class doesn't have a descriptor. This happens
# when a service stub is subclassed.
if GeneratedServiceStubType._DESCRIPTOR_KEY not in dictionary:
return
descriptor = dictionary[GeneratedServiceStubType._DESCRIPTOR_KEY]
service_stub_builder = _ServiceStubBuilder(descriptor)
service_stub_builder.BuildServiceStub(cls)
class _ServiceBuilder(object):
"""This class constructs a protocol service class using a service descriptor.
Given a service descriptor, this class constructs a class that represents
the specified service descriptor. One service builder instance constructs
exactly one service class. That means all instances of that class share the
same builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
service class.
"""
self.descriptor = service_descriptor
def BuildService(self, cls):
"""Constructs the service class.
Args:
cls: The class that will be constructed.
"""
# CallMethod needs to operate with an instance of the Service class. This
# internal wrapper function exists only to be able to pass the service
# instance to the method that does the real CallMethod work.
def _WrapCallMethod(srvc, method_descriptor,
rpc_controller, request, callback):
return self._CallMethod(srvc, method_descriptor,
rpc_controller, request, callback)
self.cls = cls
cls.CallMethod = _WrapCallMethod
cls.GetDescriptor = staticmethod(lambda: self.descriptor)
cls.GetDescriptor.__doc__ = "Returns the service descriptor."
cls.GetRequestClass = self._GetRequestClass
cls.GetResponseClass = self._GetResponseClass
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateNonImplementedMethod(method))
def _CallMethod(self, srvc, method_descriptor,
rpc_controller, request, callback):
"""Calls the method described by a given method descriptor.
Args:
srvc: Instance of the service for which this method is called.
method_descriptor: Descriptor that represent the method to call.
rpc_controller: RPC controller to use for this method's execution.
request: Request protocol message.
callback: A callback to invoke after the method has completed.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'CallMethod() given method descriptor for wrong service type.')
method = getattr(srvc, method_descriptor.name)
return method(rpc_controller, request, callback)
def _GetRequestClass(self, method_descriptor):
"""Returns the class of the request protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
request protocol message class.
Returns:
A class that represents the input protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetRequestClass() given method descriptor for wrong service type.')
return method_descriptor.input_type._concrete_class
def _GetResponseClass(self, method_descriptor):
"""Returns the class of the response protocol message.
Args:
method_descriptor: Descriptor of the method for which to return the
response protocol message class.
Returns:
A class that represents the output protocol message of the specified
method.
"""
if method_descriptor.containing_service != self.descriptor:
raise RuntimeError(
'GetResponseClass() given method descriptor for wrong service type.')
return method_descriptor.output_type._concrete_class
def _GenerateNonImplementedMethod(self, method):
"""Generates and returns a method that can be set for a service methods.
Args:
method: Descriptor of the service method for which a method is to be
generated.
Returns:
A method that can be added to the service class.
"""
return lambda inst, rpc_controller, request, callback: (
self._NonImplementedMethod(method.name, rpc_controller, callback))
def _NonImplementedMethod(self, method_name, rpc_controller, callback):
"""The body of all methods in the generated service class.
Args:
method_name: Name of the method being executed.
rpc_controller: RPC controller used to execute this method.
callback: A callback which will be invoked when the method finishes.
"""
rpc_controller.SetFailed('Method %s not implemented.' % method_name)
callback(None)
class _ServiceStubBuilder(object):
"""Constructs a protocol service stub class using a service descriptor.
Given a service descriptor, this class constructs a suitable stub class.
A stub is just a type-safe wrapper around an RpcChannel which emulates a
local implementation of the service.
One service stub builder instance constructs exactly one class. It means all
instances of that class share the same service stub builder.
"""
def __init__(self, service_descriptor):
"""Initializes an instance of the service stub class builder.
Args:
service_descriptor: ServiceDescriptor to use when constructing the
stub class.
"""
self.descriptor = service_descriptor
def BuildServiceStub(self, cls):
"""Constructs the stub class.
Args:
cls: The class that will be constructed.
"""
def _ServiceStubInit(stub, rpc_channel):
stub.rpc_channel = rpc_channel
self.cls = cls
cls.__init__ = _ServiceStubInit
for method in self.descriptor.methods:
setattr(cls, method.name, self._GenerateStubMethod(method))
def _GenerateStubMethod(self, method):
return (lambda inst, rpc_controller, request, callback=None:
self._StubMethod(inst, method, rpc_controller, request, callback))
def _StubMethod(self, stub, method_descriptor,
rpc_controller, request, callback):
"""The body of all service methods in the generated stub class.
Args:
stub: Stub instance.
method_descriptor: Descriptor of the invoked method.
rpc_controller: Rpc controller to execute the method.
request: Request protocol message.
callback: A callback to execute when the method finishes.
Returns:
Response message (in case of blocking call).
"""
return stub.rpc_channel.CallMethod(
method_descriptor, rpc_controller, request,
method_descriptor.output_type._concrete_class, callback)
| bsd-3-clause |
jledbetter/openhatch | mysite/search/migrations/0018_bug_bite_size_tag_name.py | 17 | 3743 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Bug.bize_size_tag_name'
db.add_column('search_bug', 'bize_size_tag_name', orm['search.bug:bize_size_tag_name'])
def backwards(self, orm):
# Deleting field 'Bug.bize_size_tag_name'
db.delete_column('search_bug', 'bize_size_tag_name')
models = {
'search.bug': {
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.project': {
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
}
}
complete_apps = ['search']
| agpl-3.0 |
autrilla/servo | tests/wpt/web-platform-tests/tools/manifest/sourcefile.py | 24 | 10645 | import os
import urlparse
from fnmatch import fnmatch
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
import html5lib
import vcs
from item import Stub, ManualTest, WebdriverSpecTest, RefTest, TestharnessTest
from utils import rel_path_to_url, is_blacklisted, ContextManagerStringIO, cached_property
wd_pattern = "*.py"
class SourceFile(object):
parsers = {"html":lambda x:html5lib.parse(x, treebuilder="etree"),
"xhtml":ElementTree.parse,
"svg":ElementTree.parse}
def __init__(self, tests_root, rel_path, url_base, use_committed=False):
"""Object representing a file in a source tree.
:param tests_root: Path to the root of the source tree
:param rel_path: File path relative to tests_root
:param url_base: Base URL used when converting file paths to urls
:param use_committed: Work with the last committed version of the file
rather than the on-disk version.
"""
self.tests_root = tests_root
self.rel_path = rel_path
self.url_base = url_base
self.use_committed = use_committed
self.url = rel_path_to_url(rel_path, url_base)
self.path = os.path.join(tests_root, rel_path)
self.dir_path, self.filename = os.path.split(self.path)
self.name, self.ext = os.path.splitext(self.filename)
self.type_flag = None
if "-" in self.name:
self.type_flag = self.name.rsplit("-", 1)[1]
self.meta_flags = self.name.split(".")[1:]
def __getstate__(self):
# Remove computed properties if we pickle this class
rv = self.__dict__.copy()
if "__cached_properties__" in rv:
cached_properties = rv["__cached_properties__"]
for key in rv.keys():
if key in cached_properties:
del rv[key]
del rv["__cached_properties__"]
return rv
def name_prefix(self, prefix):
"""Check if the filename starts with a given prefix
:param prefix: The prefix to check"""
return self.name.startswith(prefix)
def open(self):
"""Return a File object opened for reading the file contents,
or the contents of the file when last committed, if
use_comitted is true."""
if self.use_committed:
git = vcs.get_git_func(os.path.dirname(__file__))
blob = git("show", "HEAD:%s" % self.rel_path)
file_obj = ContextManagerStringIO(blob)
else:
file_obj = open(self.path)
return file_obj
@property
def name_is_non_test(self):
"""Check if the file name matches the conditions for the file to
be a non-test file"""
return (os.path.isdir(self.rel_path) or
self.name_prefix("MANIFEST") or
self.filename.startswith(".") or
is_blacklisted(self.url))
@property
def name_is_stub(self):
"""Check if the file name matches the conditions for the file to
be a stub file"""
return self.name_prefix("stub-")
@property
def name_is_manual(self):
"""Check if the file name matches the conditions for the file to
be a manual test file"""
return self.type_flag == "manual"
@property
def name_is_worker(self):
"""Check if the file name matches the conditions for the file to
be a worker js test file"""
return "worker" in self.meta_flags and self.ext == ".js"
@property
def name_is_webdriver(self):
"""Check if the file name matches the conditions for the file to
be a webdriver spec test file"""
# wdspec tests are in subdirectories of /webdriver excluding __init__.py
# files.
rel_dir_tree = self.rel_path.split(os.path.sep)
return (rel_dir_tree[0] == "webdriver" and
len(rel_dir_tree) > 1 and
self.filename != "__init__.py" and
fnmatch(self.filename, wd_pattern))
@property
def name_is_reference(self):
"""Check if the file name matches the conditions for the file to
be a reference file (not a reftest)"""
return self.type_flag in ("ref", "notref")
@property
def markup_type(self):
"""Return the type of markup contained in a file, based on its extension,
or None if it doesn't contain markup"""
ext = self.ext
if not ext:
return None
if ext[0] == ".":
ext = ext[1:]
if ext in ["html", "htm"]:
return "html"
if ext in ["xhtml", "xht", "xml"]:
return "xhtml"
if ext == "svg":
return "svg"
return None
@cached_property
def root(self):
"""Return an ElementTree Element for the root node of the file if it contains
markup, or None if it does not"""
if not self.markup_type:
return None
parser = self.parsers[self.markup_type]
with self.open() as f:
try:
tree = parser(f)
except Exception:
return None
if hasattr(tree, "getroot"):
root = tree.getroot()
else:
root = tree
return root
@cached_property
def timeout_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify timeouts"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='timeout']")
@cached_property
def timeout(self):
"""The timeout of a test or reference file. "long" if the file has an extended timeout
or None otherwise"""
if not self.root:
return
if self.timeout_nodes:
timeout_str = self.timeout_nodes[0].attrib.get("content", None)
if timeout_str and timeout_str.lower() == "long":
return timeout_str
@cached_property
def viewport_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify viewport sizes"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='viewport-size']")
@cached_property
def viewport_size(self):
"""The viewport size of a test or reference file"""
if not self.root:
return None
if not self.viewport_nodes:
return None
return self.viewport_nodes[0].attrib.get("content", None)
@cached_property
def dpi_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify device pixel ratios"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='device-pixel-ratio']")
@cached_property
def dpi(self):
"""The device pixel ratio of a test or reference file"""
if not self.root:
return None
if not self.dpi_nodes:
return None
return self.dpi_nodes[0].attrib.get("content", None)
@cached_property
def testharness_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
testharness.js script"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharness.js']")
@cached_property
def content_is_testharness(self):
"""Boolean indicating whether the file content represents a
testharness.js test"""
if not self.root:
return None
return bool(self.testharness_nodes)
@cached_property
def variant_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
test variant"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='variant']")
@cached_property
def test_variants(self):
rv = []
for element in self.variant_nodes:
if "content" in element.attrib:
variant = element.attrib["content"]
assert variant == "" or variant[0] in ["#", "?"]
rv.append(variant)
if not rv:
rv = [""]
return rv
@cached_property
def reftest_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
to a reftest <link>"""
if not self.root:
return []
match_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='match']")
mismatch_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='mismatch']")
return match_links + mismatch_links
@cached_property
def references(self):
"""List of (ref_url, relation) tuples for any reftest references specified in
the file"""
rv = []
rel_map = {"match": "==", "mismatch": "!="}
for item in self.reftest_nodes:
if "href" in item.attrib:
ref_url = urlparse.urljoin(self.url, item.attrib["href"])
ref_type = rel_map[item.attrib["rel"]]
rv.append((ref_url, ref_type))
return rv
@cached_property
def content_is_ref_node(self):
"""Boolean indicating whether the file is a non-leaf node in a reftest
graph (i.e. if it contains any <link rel=[mis]match>"""
return bool(self.references)
def manifest_items(self):
"""List of manifest items corresponding to the file. There is typically one
per test, but in the case of reftests a node may have corresponding manifest
items without being a test itself."""
if self.name_is_non_test:
rv = []
elif self.name_is_stub:
rv = [Stub(self, self.url)]
elif self.name_is_manual:
rv = [ManualTest(self, self.url)]
elif self.name_is_worker:
rv = [TestharnessTest(self, self.url[:-3])]
elif self.name_is_webdriver:
rv = [WebdriverSpecTest(self, self.url)]
elif self.content_is_testharness:
rv = []
for variant in self.test_variants:
url = self.url + variant
rv.append(TestharnessTest(self, url, timeout=self.timeout))
elif self.content_is_ref_node:
rv = [RefTest(self, self.url, self.references, timeout=self.timeout,
viewport_size=self.viewport_size, dpi=self.dpi)]
else:
# If nothing else it's a helper file, which we don't have a specific type for
rv = []
return rv
| mpl-2.0 |
Axiologue/AxiologueAPI | forum/models.py | 1 | 1829 | from django.db import models
from django.utils import timezone
from django.conf import settings
class Category(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
list_order = models.PositiveSmallIntegerField(blank=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'categories'
ordering = ('list_order', )
# Save function to handle assignable ordering
def save(self, *args, **kwargs):
model = self.__class__
#Automatic ordering of list_order property
if self.list_order is None:
try:
last = model.objects.order_by('-list_order')[0]
self.list_order = last.list_order + 1
except IndexError:
self.list_order = 0
return super(Category, self).save(*args, **kwargs)
class Thread(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL)
subject = models.CharField(max_length=200)
created_date = models.DateTimeField(default=timezone.now)
category = models.ForeignKey(Category, related_name="threads")
def __str__(self):
return self.subject
class Meta:
ordering = ('-created_date', )
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='posts')
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
last_edited_date = models.DateTimeField(auto_now=True)
thread = models.ForeignKey(Thread, related_name="posts")
sticky = models.BooleanField(default=False)
def __str__(self):
return self.text[:100]
class Meta:
ordering = ('-sticky', 'created_date', )
| mit |
jf---/pythonocc-core | test/core_extend_shapefactory_unittest.py | 2 | 3720 | #!/usr/bin/env python
##Copyright 2020 Thomas Paviot ([email protected])
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
import math
import unittest
from OCC.Core.BRepPrimAPI import (BRepPrimAPI_MakeBox, BRepPrimAPI_MakeSphere,
BRepPrimAPI_MakeTorus)
from OCC.Core.gp import gp_Pnt, gp_Vec
from OCC.Extend.ShapeFactory import (midpoint, scale_shape, measure_shape_volume,
translate_shp, measure_shape_mass_center_of_gravity,
edge_to_bezier)
from OCC.Extend.TopologyUtils import TopologyExplorer
class TestExtendShapeFactory(unittest.TestCase):
def test_midpoint(self):
p1 = gp_Pnt(0, 0, 0)
p2 = gp_Pnt(4, 5, 6)
p3 = midpoint(p1, p2)
self.assertEqual([p3.X(), p3.Y(), p3.Z()], [2, 2.5, 3.])
def test_measure_shape_volume(self):
# first the colume of a box a,b,c should be a*b*c
a = 10.
b = 23.
c = 98.1
box = BRepPrimAPI_MakeBox(a, b, c).Shape()
box_volume = measure_shape_volume(box)
self.assertAlmostEqual(box_volume, a * b * c, places=6)
# for a sphere of radius r, it should be 4/3.pi.r^3
r = 9.8775 # a random radius
sph = BRepPrimAPI_MakeSphere(r).Shape()
sph_volume = measure_shape_volume(sph)
self.assertAlmostEqual(sph_volume, 4./3. * math.pi * r ** 3, places=6)
def test_scale_shape(self):
box = BRepPrimAPI_MakeBox(10., 10., 10.).Shape()
box2 = scale_shape(box, 2.0, 1.0, 1.0)
# volume should be double
box2_volume = measure_shape_volume(box2)
self.assertAlmostEqual(box2_volume, 2000., places=6)
def test_measure_shape_center_of_gravity(self):
# we compute the cog of a sphere centered at a point P
# then the cog must be P
x, y, z = 10., 3., -2.44 # random values for point P
radius = 20.
vector = gp_Vec(x, y, z)
sph = translate_shp(BRepPrimAPI_MakeSphere(radius).Shape(), vector)
cog, mass, mass_property = measure_shape_mass_center_of_gravity(sph)
self.assertAlmostEqual(cog.X(), x, places=6)
self.assertAlmostEqual(cog.Y(), y, places=6)
self.assertAlmostEqual(cog.Z(), z, places=6)
self.assertAlmostEqual(mass, 4 / 3 * math.pi * radius ** 3, places=6)
self.assertEqual(mass_property, "Volume")
def test_edge_to_bezier(self):
b = BRepPrimAPI_MakeTorus(30, 10).Shape()
t = TopologyExplorer(b)
for ed in t.edges():
is_bezier, bezier_curve, degree = edge_to_bezier(ed)
self.assertTrue(isinstance(is_bezier, bool))
if not is_bezier:
self.assertTrue(degree is None)
self.assertTrue(bezier_curve is None)
else:
self.assertTrue(isinstance(degree, int))
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(TestExtendShapeFactory))
return test_suite
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
madhurrajn/samashthi | lib/django/template/loaders/base.py | 128 | 3865 | import warnings
from django.template import Origin, Template, TemplateDoesNotExist
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.inspect import func_supports_parameter
class Loader(object):
# Only used to raise a deprecation warning. Remove in Django 1.10.
_accepts_engine_in_init = True
def __init__(self, engine):
self.engine = engine
def __call__(self, template_name, template_dirs=None):
# RemovedInDjango20Warning: Allow loaders to be called like functions.
return self.load_template(template_name, template_dirs)
def get_template(self, template_name, template_dirs=None, skip=None):
"""
Calls self.get_template_sources() and returns a Template object for
the first template matching template_name. If skip is provided,
template origins in skip are ignored. This is used to avoid recursion
during template extending.
"""
tried = []
args = [template_name]
# RemovedInDjango20Warning: Add template_dirs for compatibility with
# old loaders
if func_supports_parameter(self.get_template_sources, 'template_dirs'):
args.append(template_dirs)
for origin in self.get_template_sources(*args):
if skip is not None and origin in skip:
tried.append((origin, 'Skipped'))
continue
try:
contents = self.get_contents(origin)
except TemplateDoesNotExist:
tried.append((origin, 'Source does not exist'))
continue
else:
return Template(
contents, origin, origin.template_name, self.engine,
)
raise TemplateDoesNotExist(template_name, tried=tried)
def load_template(self, template_name, template_dirs=None):
warnings.warn(
'The load_template() method is deprecated. Use get_template() '
'instead.', RemovedInDjango20Warning,
)
source, display_name = self.load_template_source(
template_name, template_dirs,
)
origin = Origin(
name=display_name,
template_name=template_name,
loader=self,
)
try:
template = Template(source, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the
# template we were asked to load. This allows for correct
# identification of the actual template that does not exist.
return source, display_name
else:
return template, None
def get_template_sources(self, template_name):
"""
An iterator that yields possible matching template paths for a
template name.
"""
raise NotImplementedError(
'subclasses of Loader must provide a get_template_sources() method'
)
def load_template_source(self, template_name, template_dirs=None):
"""
RemovedInDjango20Warning: Returns a tuple containing the source and
origin for the given template name.
"""
raise NotImplementedError(
'subclasses of Loader must provide a load_template_source() method'
)
def reset(self):
"""
Resets any state maintained by the loader instance (e.g. cached
templates or cached loader modules).
"""
pass
@property
def supports_recursion(self):
"""
RemovedInDjango20Warning: This is an internal property used by the
ExtendsNode during the deprecation of non-recursive loaders.
"""
return hasattr(self, 'get_contents')
| bsd-3-clause |
abhishekgahlot/kivy | kivy/tests/test_clock.py | 17 | 1868 | '''
Clock tests
===========
'''
import unittest
counter = 0
def callback(dt):
global counter
counter += 1
class ClockTestCase(unittest.TestCase):
def setUp(self):
from kivy.clock import Clock
global counter
counter = 0
Clock._events = {}
def test_schedule_once(self):
from kivy.clock import Clock
Clock.schedule_once(callback)
Clock.tick()
self.assertEqual(counter, 1)
def test_schedule_once_twice(self):
from kivy.clock import Clock
Clock.schedule_once(callback)
Clock.schedule_once(callback)
Clock.tick()
self.assertEqual(counter, 2)
def test_schedule_once_draw_after(self):
from kivy.clock import Clock
Clock.schedule_once(callback, 0)
Clock.tick_draw()
self.assertEqual(counter, 0)
Clock.tick()
self.assertEqual(counter, 1)
def test_schedule_once_draw_before(self):
from kivy.clock import Clock
Clock.schedule_once(callback, -1)
Clock.tick_draw()
self.assertEqual(counter, 1)
Clock.tick()
self.assertEqual(counter, 1)
def test_unschedule(self):
from kivy.clock import Clock
Clock.schedule_once(callback)
Clock.unschedule(callback)
Clock.tick()
self.assertEqual(counter, 0)
def test_unschedule_after_tick(self):
from kivy.clock import Clock
Clock.schedule_once(callback, 5.)
Clock.tick()
Clock.unschedule(callback)
Clock.tick()
self.assertEqual(counter, 0)
def test_unschedule_draw(self):
from kivy.clock import Clock
Clock.schedule_once(callback, 0)
Clock.tick_draw()
self.assertEqual(counter, 0)
Clock.unschedule(callback)
Clock.tick()
self.assertEqual(counter, 0)
| mit |
pgleeson/TestArea | models/SBMLDemo/cellMechanisms/Marhl_Calcium_Oscillations/SBML2NEURON.py | 6 | 19212 | import os.path
#!/usr/bin/env python
#
#
# A file which can be used to generate NEURON mod files (and hoc files to test them)
# from **SIMPLE** SBML files. The only SBML elements which are currently supported are:
#
# listOfCompartments
# listOfSpecies
# listOfParameters
# listOfReactions
#
#
# For usage options type 'python SBML2NEURON.py'
#
# Author: Padraig Gleeson
#
# This file has been developed as part of the neuroConstruct & NeuroML projects
#
# This work has been funded by the Wellcome Trust and the Medical Research Council
#
#
#
import sys
import os
import libsbml
import subprocess
voltageSBOTerm = 'SBO:0000259' # To indicate a parameter refers to voltage/membrane potential
currentSBOTerm = 'SBO:0000999' # *DUMMY* SBO term to indicate a parameter refers to transmembrane current
specialSBOTerms = [voltageSBOTerm, currentSBOTerm]
functionDefs = {}
functionDefs["gt"] = "FUNCTION gt(A, B) {\n if (A > B) {gt = 1}\n else {gt = 0}\n}\n\n"
functionDefs["geq"] = "FUNCTION geq(A, B) {\n if (A >= B) {geq = 1}\n else {geq = 0}\n}\n\n"
functionDefs["lt"] = "FUNCTION lt(A, B) {\n if (A < B) {lt = 1}\n else {lt = 0}\n}\n\n"
functionDefs["leq"] = "FUNCTION leq(A, B) {\n if (A <= B) {leq = 1}\n else {leq = 0}\n}\n\n"
functionDefs["power"] = "FUNCTION power(A, B) {\n power\n}\n\n"
#TODO: more functions to add from here: http://sbml.org/Software/libSBML/docs/python-api/libsbml-python-math.html
class Species:
def __init__(self, realName, index):
self.realName = realName
self.index = index
self.increase = "" # Hoe the state variable will change
def getRealName(self):
return self.realName
def getLocalName(self):
localName = self.realName
if self.realName[0]=='_':
localName = "U%s"%self.realName
return localName
def getShortName(self):
return "SP_%i"%self.index
def setStateIncrease(self, inc):
self.increase = inc
def getStateIncrease(self):
return self.increase
def __str__(self):
return self.getRealName()
class Parameter:
def __init__(self, realName, value, index):
self.realName = realName
self.localName = realName
self.value = value
self.scope = 'GLOBAL'
self.sboTerm = ''
self.index = index
self.hasRateRule = False
def isGlobalScope(self):
return self.scope == 'GLOBAL'
def getRealName(self):
return self.realName
def getLocalName(self):
return self.localName
def setLocalName(self, localName):
self.localName = localName
def getSBOTerm(self):
return self.sboTerm
def setSBOTerm(self, sboTerm):
self.sboTerm = sboTerm
def getScope(self):
return self.scope
def setScope(self, scope):
self.scope = scope
def getShortName(self):
return "p%s"%hex(self.index)[2:]
def getValue(self):
return self.value
def getHasRateRule(self):
return self.hasRateRule
def setHasRateRule(self, rr):
self.hasRateRule = rr
def __str__(self):
return self.getRealName()
def main (args):
testMode = False
if "-test" in args:
testMode = True
args.remove("-test")
if len(args) != 2 and len(args) != 4 and len(args) != 6:
print "Usage examples: \n python SBML2NEURON.py ex.xml \n (to create a mod file ex.mod from the SBML file)"
print "\n python SBML2NEURON.py ex.xml 100 0.01\n (to create a mod file ex.mod from the SBML file, plus a test hoc file to run the simulation for 100ms, timestep 0.01ms)"
print "\n python SBML2NEURON.py ex.xml 100 0.01 -1 12\n (to files as above, with y axis from -1 to 12)"
sys.exit(1)
infile = args[1]
modfile = infile[0:infile.rfind('.')]+".mod"
generateModFromSBML(infile, modfile)
if len(args) == 4 or len(args) == 6:
rv = compileMod(modfile)
if rv != 0:
raise Exception('Problem compiling the mod file: %s'%modfile)
return
hocfile = infile[0:infile.rfind('.')]+"_test.hoc"
dur = float(args[2])
dt = float(args[3])
ymin = 0
ymax = 10
if len(args) == 6:
ymin = float(args[4])
ymax = float(args[5])
generateTestHocFromSBML(infile, hocfile, dur, dt, ymin, ymax, testMode)
def compileMod(infile):
modToCompile = os.path.realpath(infile)
dirToCompile = os.path.split(modToCompile)[0]
cmdToCompileMod = 'nrnivmodl'+ dirToCompile # Assumes command is on PATH for Linux/Mac
if sys.platform.count('win') >0:
nrnDir = 'C:/nrn62' # NOTE forward slash is preferred by Python
cmdToCompileMod = "\""+nrnDir+"/bin/rxvt.exe\" -e \""+nrnDir+"/bin/sh\" \""+nrnDir+"/lib/mknrndll.sh\" \""+ nrnDir+"\" "
print "Compiling the mod files using: "+ cmdToCompileMod +" in dir: "+ dirToCompile
process = subprocess.Popen(cmdToCompileMod, cwd=dirToCompile)
rv = process.wait()
print "Compilation has finished with return val: %s"%rv
return rv
def generateTestHocFromSBML(sbmlFile, hocfile, dur, dt, ymin, ymax, testMode):
print "Going to read SBML from file: %s and create hoc file: %s" % (sbmlFile, hocfile)
reader = libsbml.SBMLReader()
sbmldoc = reader.readSBML(sbmlFile)
hoc = file(hocfile, mode='w')
hoc.write("load_file(\"nrngui.hoc\")\n\n")
hoc.write("print \"Testing mapping of SBML file %s on NEURON\"\n\n"%sbmlFile)
hoc.write("create soma\n")
hoc.write("access soma\n")
hoc.write("L = 1\n")
hoc.write("diam = 1\n\n")
hoc.write("insert pas \n\n")
modelId = getModelId(sbmldoc.getModel())
hoc.write("insert %s\n\n"%modelId)
hoc.write("psection()\n\n")
hoc.write("tstop = %s\n"%dur)
hoc.write("dt = %s\n"%dt)
hoc.write("steps_per_ms = %f\n\n"%(1/dt))
hoc.write("objref SampleGraph\n")
hoc.write("SampleGraph = new Graph(0)\n\n")
hoc.write("minVal = %s\n"%ymin)
hoc.write("maxVal = %s\n\n"%ymax)
hoc.write("{SampleGraph.size(0,tstop,minVal,maxVal)}\n")
hoc.write("{SampleGraph.view(0, minVal, tstop, (maxVal-minVal), 100, 500, 500, 300)}\n")
hoc.write("{\n")
colIndex = 1
for species in sbmldoc.getModel().getListOfSpecies():
print "Looking at: "+str(species.getId())
speciesName = species.getId()
if speciesName[0] == '_':
speciesName = 'U'+speciesName
hoc.write(" SampleGraph.addexpr(\"soma.%s_%s(0.5)\", %i, 1)\n" % (speciesName, modelId, colIndex))
colIndex+=1
for param in sbmldoc.getModel().getListOfParameters():
if not param.getConstant():
print "Looking at: "+str(param.getId())
paramName = param.getId()
hoc.write(" SampleGraph.addexpr(\"soma.%s_%s(0.5)\", %i, 1)\n" % (paramName, modelId, colIndex))
colIndex+=1
hoc.write(" graphList[0].append(SampleGraph)\n")
hoc.write("}\n\n")
hoc.write("print \"Starting simulation!\"\n")
hoc.write("{run()}\n")
hoc.write("print \"Finished simulation!\"\n")
print "Hoc file written to %s\n" % (hocfile)
if testMode:
hoc.write("\nobjref testResultFile\n")
hoc.write("{testResultFile = new File()}\n")
hoc.write("{testResultFile.wopen(\"%s.finished\")}\n" % (os.path.basename(hocfile) ))
#{testResultFile.printf("numPassed=%g\n", numTestsPassed)}
hoc.write("{testResultFile.close()}\n")
def getModelId(model):
modelId = model.getId()
if len(modelId)>=28:
modelId = modelId[0:28]
return modelId
# This assumes the formulas are generated with spaces, brackets or commas around the values
def replaceInFormula(formula, oldTerm, newTerm):
if formula.startswith(oldTerm):
formula = newTerm + formula[len(oldTerm):]
if formula.endswith(oldTerm):
formula = formula[:-1*len(oldTerm)]+newTerm
formula = formula.replace(" "+oldTerm+" ", " "+newTerm+" ")
formula = formula.replace(" "+oldTerm+")", " "+newTerm+")")
formula = formula.replace(","+oldTerm+")", ","+newTerm+")")
formula = formula.replace("("+oldTerm+" ", "("+newTerm+" ")
formula = formula.replace("("+oldTerm+",", "("+newTerm+",")
formula = formula.replace(" "+oldTerm+",", " "+newTerm+",")
return formula
def generateModFromSBML(sbmlFile, modFile):
print "Going to read SBML from file: %s and create mod file: %s" % (sbmlFile, modFile)
reader = libsbml.SBMLReader()
sbmldoc = reader.readSBML(sbmlFile)
mod = file(modFile, mode='w')
modelId = getModelId(sbmldoc.getModel())
mod.write("TITLE SBML model: %s generated from file: %s\n\n" % (modelId, sbmlFile) )
mod.write("UNITS {\n")
mod.write(" (mA) = (milliamp)\n")
mod.write(" (mV) = (millivolt)\n")
mod.write("}\n\n")
derivs = ""
initial = ""
substituteSpeciesNames = False
speciesInfo = [] # List of Species objects
parameterInfo = [] # List of Parameter objects
for species in sbmldoc.getModel().getListOfSpecies():
s = Species(species.getId(), len(speciesInfo))
speciesInfo.append(s)
initVal = species.getInitialAmount()
if initVal == 0 and species.getInitialConcentration() >0:
initVal = species.getInitialConcentration()
initial = "%s %s = %s\n" % (initial, s.getLocalName(), initVal)
if len(speciesInfo)>=5:
print "There are %i species. Using shortened names in DERIVATIVE statement"%len(speciesInfo)
substituteSpeciesNames = True
print "States/species found: "+ str(speciesInfo)
for compartment in sbmldoc.getModel().getListOfCompartments():
p = Parameter(compartment.getId(), compartment.getSize(), len(parameterInfo))
parameterInfo.append(p)
for parameter in sbmldoc.getModel().getListOfParameters():
p = Parameter(parameter.getId(), parameter.getValue(), len(parameterInfo))
p.setSBOTerm(parameter.getSBOTermID())
if parameter.getSBOTermID() in specialSBOTerms:
print "SBOTerm of %s (%s) is special..." % (parameter.getId(), parameter.getSBOTermID() )
if parameter.getSBOTermID() == voltageSBOTerm:
p.setLocalName('v')
parameterInfo.append(p)
for rule in sbmldoc.getModel().getListOfRules():
if rule.getType() == libsbml.RULE_TYPE_RATE:
for p in parameterInfo:
if p.getRealName() == rule.getVariable():
p.setHasRateRule(True)
'''
# Reordering by longest name first so that parameter par1 doesn't get substituted for par10, etc.
sortedParamNames = []
for param in parameterInfo:
sortedParamNames.append(param.getRealName())
sortedParamNames.sort(key=len, reverse=True)
print sortedParamNames
sortedParams = []
for paramName in sortedParamNames:
for param in parameterInfo:
if param.getRealName()==paramName:
sortedParams.append(param)
parameterInfo = sortedParams
for param in parameterInfo: print param
'''
infoString = ""
extraFunctions = ""
for rule in sbmldoc.getModel().getListOfRules():
print "Looking at rule: %s of type: %s"%(rule, rule.getType())
if rule.getType() == libsbml.RULE_TYPE_RATE:
print "Rate rule: d(%s)/dt = %s"%(rule.getVariable(), rule.getFormula())
derivs = "%s %s' = %s\n"%(derivs, rule.getVariable(), rule.getFormula())
for reaction in sbmldoc.getModel().getListOfReactions():
rid = reaction.getId()
formula = reaction.getKineticLaw().getFormula()
origFormula = str(formula)
print "Looking at reaction %s with formula: (%s)"%(rid, reaction.getKineticLaw().getFormula())
for parameter in reaction.getKineticLaw().getListOfParameters():
localParamName = "%s_%s" % (rid,parameter.getId())
p = Parameter(parameter.getId(), parameter.getValue(), len(parameterInfo))
p.setLocalName(localParamName)
p.setScope(rid)
parameterInfo.append(p)
for param in parameterInfo:
if substituteSpeciesNames:
if param.getRealName() in formula:
print "Substituting %s for %s in: %s"%(param.getRealName(), param.getShortName(), formula)
formula = replaceInFormula(formula, param.getRealName(), param.getShortName())
else:
if param.isGlobalScope() or param.getScope()==rid:
formula = replaceInFormula(formula, param.getRealName(), param.getLocalName())
for species in speciesInfo:
formula = replaceInFormula(formula, species.getRealName(), species.getLocalName())
'''if substituteSpeciesNames:
for species in speciesInfo:
formula = replaceInFormula(formula, species.getRealName(), species.getShortName())'''
if substituteSpeciesNames:
formula = formula.replace(" * ", "*")
formula = formula.replace(" + ", "+")
prodString = ""
reacString = ""
for product in reaction.getListOfProducts():
stoichiometryFactor = ""
if product.getStoichiometry() != 1:
stoichiometryFactor = "%f * "%product.getStoichiometry()
prodSpecies = None
for species in speciesInfo:
if species.getRealName() == product.getSpecies():
prodSpecies = species
if prodSpecies.getStateIncrease() != "":
prodSpecies.setStateIncrease(prodSpecies.getStateIncrease() + " +")
prodSpecies.setStateIncrease("%s (%s%s)" % (prodSpecies.getStateIncrease(), stoichiometryFactor, formula))
if len(prodString) > 0: prodString = prodString +", "
prodString = prodString+ product.getSpecies()
for reactant in reaction.getListOfReactants():
stoichiometryFactor = ""
if reactant.getStoichiometry() != 1:
stoichiometryFactor = "%f * "%reactant.getStoichiometry()
reactantSpecies = None
for species in speciesInfo:
if species.getRealName() == reactant.getSpecies():
reactantSpecies = species
reactantSpecies.setStateIncrease("%s - (%s%s)" % (reactantSpecies.getStateIncrease(), stoichiometryFactor, formula))
if len(reacString) > 0: reacString = reacString +", "
reacString = reacString+ reactant.getSpecies()
infoString = "%s : Reaction %s (%s) -> (%s) with formula : %s (ORIGINALLY: %s)\n" % (infoString, rid, reacString, prodString, formula, origFormula)
for species in speciesInfo:
if species.getStateIncrease() != "":
derivs = "%s %s\' = %s \n" % (derivs, species.getLocalName(), species.getStateIncrease())
assigned = ''
mod.write("NEURON {\n")
mod.write(" SUFFIX %s\n" % modelId)
for param in parameterInfo:
if param.getSBOTerm() != voltageSBOTerm and param.getSBOTerm() != currentSBOTerm and not param.getHasRateRule():
mod.write(" RANGE %s\n" % param.getLocalName())
if param.getSBOTerm() == currentSBOTerm:
mod.write(" NONSPECIFIC_CURRENT %s\n" % param.getLocalName())
#assigned = "%s\n %s (nanoamp)\n"%(assigned, param.getLocalName())
mod.write("}\n\n")
mod.write("PARAMETER {\n")
for param in parameterInfo:
if not param.getHasRateRule():
if param.getSBOTerm() == voltageSBOTerm:
mod.write(" v (mV)\n")
elif param.getSBOTerm() == currentSBOTerm:
assigned = "%s %s (nanoamp)\n"%(assigned, param.getLocalName())
else:
mod.write(" %s = %s\n"%(param.getLocalName(), param.getValue()))
mod.write("}\n\n")
if len(assigned) > 0:
mod.write("ASSIGNED {\n")
mod.write(assigned)
mod.write("}\n\n")
print "Num species: "+str(sbmldoc.getModel().getListOfSpecies().size())
mod.write("STATE {\n")
for species in speciesInfo:
mod.write(" %s\n"%species.getLocalName())
for param in parameterInfo:
if param.getHasRateRule():
mod.write(" %s\n"%param.getLocalName())
mod.write("}\n\n")
mod.write("INITIAL {\n")
mod.write(initial)
#for param in parameterInfo:
# mod.write(" %s = %s\n"%(param.getLocalName(), param.getValue()))
for param in parameterInfo:
if param.getHasRateRule():
mod.write(" %s = %s\n"%(param.getLocalName(), param.getValue()))
mod.write("}\n\n")
mod.write("BREAKPOINT {\n")
mod.write(" SOLVE states METHOD derivimplicit\n")
mod.write(" ? Need to check order in which assignments/event assignments should be updated!!!\n")
for rule in sbmldoc.getModel().getListOfRules():
if rule.getType() == libsbml.RULE_TYPE_SCALAR:
ruleString = "%s = %s"%(rule.getVariable(), rule.getFormula())
mod.write("\n ? Assignment rule here: %s\n"%ruleString)
mod.write(" %s\n"%ruleString)
for event in sbmldoc.getModel().getListOfEvents():
trigger = libsbml.formulaToString((event.getTrigger().getMath()))
trigger = replaceInFormula(trigger, "time", "t")
print "Adding info on event with trigger: %s"%(trigger)
mod.write(" if (%s) {\n" % trigger)
for ea in event.getListOfEventAssignments():
print "Event assi: %s = %s"%(ea.getVariable(), libsbml.formulaToString(ea.getMath()))
var = ea.getVariable()
if var == "time":
var = "t"
formula = libsbml.formulaToString(ea.getMath())
formula = replaceInFormula(formula, "time", "t")
formula = replaceInFormula(formula, "pow", "power")
for function in functionDefs.keys():
if function in formula or function in trigger:
replaceWith = functionDefs[function]
if not replaceWith in extraFunctions:
extraFunctions = extraFunctions+replaceWith
mod.write(" %s = %s\n"%(var, libsbml.formulaToString(ea.getMath())))
mod.write(" }\n\n")
mod.write("}\n\n")
mod.write("DERIVATIVE states {\n")
if substituteSpeciesNames:
mod.write(" LOCAL dummy ")
'''for speciesInd in range(0, len(speciesInfo)):
if speciesInd>0: mod.write(",")
mod.write("%s"%speciesInfo[speciesInd].getShortName())'''
for param in parameterInfo:
mod.write(",%s"%param.getShortName())
mod.write("\n")
'''for species in speciesInfo:
mod.write(" %s = %s\n"%(species.getShortName(), species.getLocalName()))
mod.write("\n")'''
for param in parameterInfo:
mod.write(" %s = %s\n"%(param.getShortName(), param.getLocalName()))
mod.write("\n")
mod.write(infoString)
mod.write(derivs)
mod.write("}\n\n")
mod.write(extraFunctions)
print "Mod file written to %s" % (modFile)
'''
formula = "(a+b) * (c-d)"
ast = libsbml.parseFormula(formula)
print libsbml.formulaToString(ast)
def convertASTNode(astNode):
if astNode is not None:
print "Converting : %s"%astNode
print "To : %s"%libsbml.formulaToString(astNode)
print "Curr node: %s, %s, children: %i" %(astNode.getName(), astNode.getType(), astNode.getNumChildren())
if astNode.getNumChildren() == 2:
convertASTNode(astNode.getChild(0))
convertASTNode(astNode.getChild(1))
'''
if __name__ == '__main__':
main(sys.argv)
| gpl-2.0 |
xiaoneng/crispy-forms-pure | docs/conf.py | 1 | 8617 | # -*- coding: utf-8 -*-
#
# crispy-form-pure documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 15 20:21:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from crispy_forms_pure import __version__ as crispy_forms_pure_version
# Push a dummy settings file required by Django that is imported in
# "crispy_forms_pure.layout"
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'dummy_settings'
from django.conf import settings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'crispy-form-pure'
copyright = u'2014, David THENON'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = crispy_forms_pure_version
# The full version, including alpha/beta/rc tags.
release = crispy_forms_pure_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'crispy-form-puredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'crispy-form-pure.tex', u'crispy-form-pure Documentation',
u'David THENON', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'crispy-form-pure', u'crispy-form-pure Documentation',
[u'David THENON'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'crispy-form-pure', u'crispy-form-pure Documentation',
u'David THENON', 'crispy-form-pure', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
awemany/BitcoinUnlimited | test/functional/wallet-hd.py | 10 | 3623 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
start_nodes,
start_node,
assert_equal,
connect_nodes_bi,
assert_start_raises_init_error
)
import os
import shutil
class WalletHDTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
self.node_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']]
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.node_args)
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i+1)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_+1)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
self.stop_node(1)
self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1] + ['-rescan'])
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
if __name__ == '__main__':
WalletHDTest().main ()
| mit |
Jumpscale/jumpscale6_core | lib/JumpScale/lib/ovsnetconfig/VXNet/systemlist.py | 1 | 9345 | __author__ = 'delandtj'
from utils import *
import fcntl
import time
import re
from netaddr import *
from utils import *
def acquire_lock(path):
"""
little tool to do EAGAIN until lockfile released
:param path:
:return: path
"""
lock_file = open(path, 'w')
while True:
send_to_syslog("attempting to acquire lock %s" % path)
try:
fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
send_to_syslog("acquired lock %s" % path)
return lock_file
except IOError, e:
send_to_syslog("failed to acquire lock %s because '%s' - waiting 1 second" % (path, e))
time.sleep(1)
def wait_for_if(interface):
pass
def pprint_dict(a):
from pprint import pprint
pprint(dict(a.items()))
def get_nic_params():
nictypes = {}
bridges = get_all_bridges()
namespaces = get_all_namespaces()
def populatenictypes(lines, namespace=None):
for l in lines:
if not 'state' in l: continue
entry = l.strip().split()
intf = entry[1].rstrip(':')
if intf == 'lo' : continue
nictypes[intf] = {}
if 'vxlan' in entry :
want = ('state','id' ,'mtu','id','group','dev','port')
params = parse_ipl_line(entry,want)
params['type'] = 'vxlan'
elif 'veth' in entry:
want = ('state','id','mtu')
params = parse_ipl_line(entry,want)
params['peer'] = find_veth_peer(intf,ns=namespace)
params['type'] = 'veth'
elif 'tun' in entry:
want = ('state','id','mtu')
params = parse_ipl_line(entry,want)
params['type'] = 'tun'
elif intf in bridges:
want = ('state','id','mtu')
params = parse_ipl_line(entry,want)
params['type'] = 'bridge'
else:
want = ('state','id','mtu')
params = parse_ipl_line(entry,want)
nictypes[intf]['params'] = params
if namespace == None:
nictypes[intf]['detail'] = get_nic_detail(intf)
nictypes[intf]['namespace'] = None
else:
nictypes[intf]['namespace'] = namespace
return nictypes
# local
cmd = '%s -o -d link show ' % ip
intflist = dobigexec(cmd.split())
lines = intflist[0].split('\n')
nictypes = populatenictypes(lines)
# all namespaces
for ns in namespaces:
cmd = '%s netns exec %s %s -o -d link show' % (ip,ns,ip)
(r,s,e) = doexec(cmd.split())
lines = s.readlines()
nictypes = dict(populatenictypes(lines,namespace=ns).items() + nictypes.items())
return nictypes
def parse_ipl_line(line, params):
"""
Get NIC settings in line
:param line: list of ip -o -d link show
:param params: tuple of keywords
"""
nicsettings = {}
for p in params:
if p == 'state':
nicsettings[p] = line[2].lstrip('<').rstrip('>').split(',')
elif p == 'id':
nicsettings[p] = line[0].rstrip(':')
else:
nicsettings[p] = line[line.index(p)+1] #watch out for index out of range
return nicsettings
def get_nic_detail(interface):
prefix= '/sys/class/net'
# every interface has a mac
carrier = None; speed = None; peer = None
with open(os.path.join(prefix,interface,"address")) as f:
addr = f.readline().strip()
# if linked to somewhere in pci it prolly is physical
if 'pci' in os.readlink(os.path.join(prefix,interface)):
typ = 'PHYS'
elif 'virtual' in os.readlink(os.path.join(prefix,interface)):
typ = 'VIRT'
else:
typ = 'UNKNOWN'
if typ == 'PHYS':
# verify if link has carrier
(r,s,e) = doexec(['ethtool',interface])
# Link Detected and speed
out = s.readlines()
for i in out:
string = i.strip().split(':')
if string[0] == 'Link detected' :
carrier = True if string[1].strip() == 'yes' else False
if carrier == True:
for i in out:
string=i.strip().split(':')
if string[0] == 'Speed' : speed = string[1].strip()
return [typ,addr,carrier,speed]
def find_veth_peer(interface,ns=None):
"""
Left or right part of veth
@param interface:
@return: name
"""
cmd = '%s -S %s'% (ethtool, interface)
if ns != None:
cmd = '%s netns exec %s ' % (ip,ns) + cmd
r,s,e = doexec(cmd.split())
a=s.readlines()
peer = [int(x.split(':')[1].rstrip()) for x in a if 'ifindex' in x]
if len(peer) > 0 :
return peer[0]
else: return None
def add_ips_to(physlayout):
fullset = {}
iplist = get_ip_addrs()
for key in iplist: # you can have interfaces without ip
fullset[key] = physlayout[key]
fullset[key]['ipaddrs'] = iplist[key]
# merge rest
for key in physlayout:
if key not in fullset:
fullset[key] = physlayout[key]
if physlayout[key]['namespace'] != None:
fullset[key]['ipaddrs'] = get_ip_addrs(namespace=physlayout[key]['namespace'])
return fullset
def get_ip_addrs(onlypermanent=False, namespace=None):
if namespace == None:
cmd = '%s -o addr show' % ip
else:
cmd = '%s netns exec %s %s -o addr show' % (ip, namespace, ip)
(r,s,e) = doexec(cmd.split())
lines = s.readlines()
iplist = {}
for l in lines:
i = l.strip().split()
if not 'forever' in l and onlypermanent: continue
iface = i[1].rstrip(':'); ipstr = i[3]
if iface == 'lo' : continue
ipobj = IPNetwork(ipstr)
if iface not in iplist:
iplist[iface] = {}
iplist[iface]['ipaddrs'] = []
iplist[iface]['ipaddrs'].append(ipobj)
else:
iplist[iface]['ipaddrs'].append(ipobj)
return iplist
def isup(interface):
cmd = '%s -o link show dev %s' % interface
r,s,e = doexec(cmd.split())
line = s.readlines() ; l = line[0].strip().split()
state = l[2].lstrip('<').rstrip('>').split(',')
if 'UP' in state:
return True
return False
def getnetworkstructure(onlypermanent=True,without_ip=False):
"""
@param onlypermanent:
@param without_ip:
@return:
"""
(r,s,e) = doexec('ip -o addr show'.split())
interfaces = s.readlines()
s = {}
for l in interfaces:
i = l.split()
if not 'forever' in l and onlypermanent and not without_ip: continue
id = re.match('\d+',i[0]).group()
intf = i[1]; inet = i[2]; ipstr = i[3]
if not s.has_key(intf): s[intf] = {}
s[intf]['id'] = id
if not s[intf].has_key(inet): s[intf][inet] = []
s[intf][inet].append(IPNetwork(ipstr))
nictype,mac,carrier,speed = get_nic_detail(intf)
s[intf]['nictype'] = nictype
s[intf]['mac'] = mac
if carrier:
s[intf]['link'] = 'detected'
s[intf]['speed'] = speed
return s
def cleanup_flows(bridge_name,interface):
"""
flows of which ports do not exist any more get removed (generic cleanup)
@param bridge_name:
"""
flowports = list_ports_in_of()
activeports = [int(get_vswitch_port(x)) for x in list_ports(interface)]
ap = set(activeports)
todelete = [x for x in flowports if x not in ap]
for i in todelete:
clear_vswitch_rules(bridge_name, i)
def list_ports_in_of(interface):
"""
list ports in openFlow tables
@return:
"""
ipm = re.compile('(?<=in_port\=)\d{1,5}')
cmd = ofctl + " dump-flows " + interface
(r, s, e) = doexec(cmd.split())
li = [line.strip() for line in s.readlines() if 'in_port' in line]
ports = [int(ipm.search(x).group(0)) for x in li]
return ports
def get_attached_mac_port(virt_vif):
"""
@param virt_vif:
@return: port and mac
"""
if virt_vif:
cmd = vsctl + ' -f table -d bare --no-heading -- --columns=ofport,external_ids list Interface ' + virt_vif
(r, s, e) = doexec(cmd.split())
o = s.readline().split()
port = o.pop(0)
mac = o.pop(0).split('=')[1]
return port, mac
else:
send_to_syslog("No matching virt port found in get_attached_mac_port(virt_vif)")
sys.exit(0)
def get_bridge_name(vif_name):
"""
@param vif_name:
@return: bridge
"""
(rc, stdout, stderr) = doexec([vsctl, "port-to-br", vif_name])
return stdout.readline().strip()
def list_ports(bridge_name):
"""
@param bridge_name:
@return: all ports on bridge
"""
(rc, stdout, stderr) = doexec([vsctl, "list-ports", bridge_name])
ports = [line.strip() for line in stdout.readlines()]
return ports
def get_vswitch_port(vif_name):
"""
@param vif_name:
@return: all ports
"""
(rc, stdout, stderr) = doexec([vsctl, "get", "interface", vif_name, "ofport"])
return stdout.readline().strip()
def clear_vswitch_rules(bridge_name, port):
"""
@param bridge_name:
@param port:
"""
doexec([ofctl, "del-flows", bridge_name, "in_port=%s" % port])
if __name__ =='__main__':
a = get_nic_params()
pprint_dict(a)
| bsd-2-clause |
peastman/msmbuilder | msmbuilder/msm/validation/bootstrapmsm.py | 7 | 8157 | # Author: Mohammad M. Sultan <[email protected]>
# Contributors: Brooke Husic <[email protected]>
# Copyright (c) 2016, Stanford University
# All rights reserved.
from __future__ import absolute_import, division
from multiprocessing import Pool, cpu_count
from msmbuilder.utils import list_of_1d
from sklearn.utils import resample
from ..core import _MappingTransformMixin
from ..msm import MarkovStateModel
import numpy as np
import warnings
class BootStrapMarkovStateModel(_MappingTransformMixin):
"""Bootstrap MarkovState Model.
This model fits a series of first-order Markov models
to bootstrap samples obtained from a dataset of
integer-valued timeseries.The sequence of transition
matrices are obtained using random sampling with
replacement over the set of input trajectories.
The model also fits the mle over the original set.
Parameters
----------
n_samples : int
Number of bootstrap models to construct
n_procs : int
Number of processors to use.
Defaults to int(cpu_count/2)
msm_args: dict
Dictionary containing arguments to pass unto
the MSM models.
save_all_models: bool
Whether or not to save all the models.
Defaults to false.
Attributes
----------
mle_ : Markov State Model
MSM model fit unto original dataset. The state mapping
inferred here is used through the rest of the models.
all_populations_ : list of lists
Array of all populations obtained from each model
all_training_scores_ : list
List of scores obtained from each model
all_test_scores_ : list
List of scores obtained on the sequences omitted from
each model
resample_ind_ : list
lis of resample indices used to fit each bootstrap mdl.
This can be used to regenerate any mdl without having
to store it inside this object.
mapped_populations_ : array (n_samples, mle.n_states_)
Array containing population estimates from all the
models for the states retained in the mle model.
mapped_populations_mean_ : array shape = (mle.n_states_)
Mean population across the set of models for the states
contained in the mle model.
mapped_populations_std_ : array shape = (mle.n_states_)
Population std across the set of models for the states
contained in the mle model.
mapped_populations_sem_ : array shape = (mle.n_states_)
Population sem across the set of models for the states
contained in the mle model.(std/sqrt(self._succesfully_fit))
training_scores_mean_ : list
Mean population across the list of model scores
training_scores_std_ : list
Population std across the list of model scores
test_scores_mean_ : list
Mean population across the list of scores obtained on
the sequences omitted from each model
test_scores_std : list
Population std across the list of scores obtained on
the sequences omitted from each model
Notes
-----
The correct number of bootstrap sample is subject to
debate with several hundred samples(n_samples)
being the recommended starting figure.
The fit function for this model optionally takes in a pool
of workers making it capable of parallelizing across
compute nodes via mpi or ipyparallel. This can lead to
a significant speed up for larger number of samples.
Examples
--------
>>> bmsm = BootStrapMarkovStateModel(n_samples=800,
msm_args={'lag_time':1})
"""
def __init__(self, n_samples=10, n_procs=None, msm_args=None, save_all_models = False):
self.n_samples = n_samples
self.n_procs = n_procs
if msm_args is None:
msm_args = {}
self.msm_args = msm_args
self.mle_ = MarkovStateModel(**self.msm_args)
self.save_all_models = save_all_models
self._succesfully_fit = 0
self._ommitted_trajs_ = None
self.all_models_ = None
self.all_populations_ = None
self.mapped_populations_ = None
self.all_training_scores_ = None
self.all_test_scores_ = None
self.resample_ind_ = None
def fit(self, sequences, y=None, pool=None):
sequences = list_of_1d(sequences)
self.mle_.fit(sequences, y=y)
self._parallel_fit(sequences, pool)
return self
def _parallel_fit(self, sequences, pool=None):
if self.n_procs is None:
self.n_procs = int(cpu_count()/2)
if pool is None:
pool = Pool(self.n_procs)
self.all_populations_ = []
self.mapped_populations_ = np.zeros((self.n_samples, self.mle_.n_states_))
self.mapped_populations_[:,:] = np.nan
self.all_training_scores_ = []
self.all_test_scores_ = []
#we cache the sequencs of re sampling indices so that any mdl can be
#regenerated later on
self.resample_ind_ = [resample(range(len(sequences)))
for _ in range(self.n_samples)]
jbs =[([sequences[trj_ind] for trj_ind in sample_ind],
self.msm_args)
for sample_ind in self.resample_ind_]
traj_set = set(range(len(sequences)))
#get trajectory index that were omitted in each sampling
omitted_trajs = [traj_set.difference(set(sample_ind))
for sample_ind in self.resample_ind_]
self._ommitted_trajs_ = omitted_trajs
#get the test jobs
test_jbs = [[sequences[trj_ind] for trj_ind in omitted_index]
for omitted_index in omitted_trajs]
all_models = pool.map(_fit_one, jbs)
for mdl_indx, mdl in enumerate(all_models):
if mdl is not None:
self._succesfully_fit += 1
self.all_populations_.append(mdl.populations_)
self.mapped_populations_[mdl_indx,:] = \
_mapped_populations(self.mle_, mdl)
self.all_training_scores_.append(mdl.score_) # BEH
try:
self.all_test_scores_.append(mdl.score(test_jbs[mdl_indx]))
except ValueError:
self.all_test_scores_.append(np.nan)
if self.save_all_models:
self.all_models_ = all_models
@property
def mapped_populations_mean_(self):
return np.mean(self.mapped_populations_, axis=0)
@property
def mapped_populations_std_(self):
return np.std(self.mapped_populations_, axis=0)
@property
def mapped_populations_sem_(self):
return np.std(self.mapped_populations_, axis=0)/np.sqrt(self._succesfully_fit)
@property
def training_scores_mean_(self):
return np.nanmean(self.all_training_scores_, axis=0)
@property
def training_scores_std_(self):
return np.nanstd(self.all_training_scores_, axis=0)
@property
def test_scores_mean_(self):
return np.nanmean(self.all_test_scores_, axis=0)
@property
def test_scores_std_(self):
return np.nanstd(self.all_test_scores_, axis=0)
def _mapped_populations(mdl1, mdl2):
"""
Method to get the populations for states in mdl 1
from populations inferred in mdl 2. Resorts to 0
if population is not present.
"""
return_vect = np.zeros(mdl1.n_states_)
for i in range(mdl1.n_states_):
try:
#there has to be a better way to do this
mdl1_unmapped = mdl1.inverse_transform([i])[0][0]
mdl2_mapped = mdl2.mapping_[mdl1_unmapped]
return_vect[i] = mdl2.populations_[mdl2_mapped]
except:
pass
return return_vect
def _fit_one(jt):
sequences, msm_args = jt
mdl = MarkovStateModel(**msm_args)
#there is no guarantee that the mdl fits this sequence set so
#we return None in that instance.
try:
mdl.fit(sequences)
# solve the eigensystem
except ValueError:
mdl = None
warnings.warn("One of the MSMs fitting "
"failed")
return mdl
| lgpl-2.1 |
oshtaier/robottelo | robottelo/api/utils.py | 1 | 4523 | """Module containing convenience functions for working with the API."""
from nailgun import client
from robottelo.common import helpers
from robottelo import entities
from urlparse import urljoin
class RepositoryPackagesException(Exception):
"""Indicates that a repository's packages could not be fetched."""
class RepositoryErrataException(Exception):
"""Indicates that a repository's errata could not be fetched."""
def get_errata(repository_id):
"""Return all erratums belonging to repository ``repository_id``.
:param int repository_id: A repository ID.
:return: That repository's errata.
:raises robottelo.api.utils.RepositoryErrataException: If an error occurs
while fetching the requested repository's errata.
"""
path = urljoin(
helpers.get_server_url(),
'katello/api/v2/repositories/{0}/errata'.format(repository_id)
)
response = client.get(
path,
auth=helpers.get_server_credentials(),
verify=False,
).json()
if 'errors' in response.keys():
raise RepositoryErrataException(
'Error received after issuing GET to {0}. Error received: {1}'
''.format(path, response['errors'])
)
return response['results']
def get_packages(repository_id):
"""Return all packages belonging to repository ``repository_id``.
:param int repository_id: A repository ID.
:return: That repository's packages.
:raises robottelo.api.utils.RepositoryPackagesException: If an error occurs
while fetching the requested repository's packages.
"""
path = urljoin(
helpers.get_server_url(),
'katello/api/v2/repositories/{0}/packages'.format(repository_id)
)
response = client.get(
path,
auth=helpers.get_server_credentials(),
verify=False,
).json()
if 'errors' in response.keys():
raise RepositoryPackagesException(
'Error received after issuing GET to {0}. Error received: {1}'
''.format(path, response['errors'])
)
return response['results']
def status_code_error(path, desired, response):
"""Compose an error message using ``path``, ``desired`` and ``response``.
``desired`` and ``path`` are used as-is. The following must be present on
``response``:
* ``response.status_code``
* ``response.json()``
:param str path: The path to which a request was sent.
:param int desired: The desired return status code.
:param response: The ``Response`` object returned.
:return: An error message.
:rtype: str
"""
# Decode response into JSON format, if possible.
try:
json_response = response.json()
except ValueError:
json_response = None
# Generate error message.
if json_response is None:
err_msg = 'Could not decode response; not in JSON format.'
else:
if 'error' in json_response.keys():
err_msg = json_response['error']
elif 'errors' in json_response.keys():
err_msg = json_response['errors']
else:
err_msg = 'Response in JSON format, but contains no error message.'
return u'Desired HTTP {0} but received HTTP {1} after sending request ' \
'to {2}. {3}'.format(desired, response.status_code, path, err_msg)
def enable_rhrepo_and_fetchid(basearch, org_id, product, repo,
reposet, releasever):
"""Enable a RedHat Repository and fetches it's Id.
:param str org_id: The organization Id.
:param str product: The product name in which repository exists.
:param str reposet: The reposet name in which repository exists.
:param str repo: The repository name who's Id is to be fetched.
:param str basearch: The architecture of the repository.
:param str releasever: The releasever of the repository.
:return: Returns the repository Id.
:rtype: str
"""
prd_id = entities.Product().fetch_rhproduct_id(name=product, org_id=org_id)
reposet_id = entities.Product(id=prd_id).fetch_reposet_id(name=reposet)
task = entities.Product(id=prd_id).enable_rhrepo(
base_arch=basearch,
release_ver=releasever,
reposet_id=reposet_id,
)
if task['result'] != "success":
raise entities.APIResponseError(
'Enabling the RedHat Repository {0} failed. Error: {1}'
.format(repo, task['humanized']['errors']))
return entities.Repository().fetch_repoid(name=repo, org_id=org_id)
| gpl-3.0 |
lento/cortex | test/IECore/MatrixMultiplyOp.py | 11 | 4939 | ##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
from IECore import *
class TestMultiplyMatrixOp( unittest.TestCase ) :
def testMultiplication( self ) :
vectorTypes = [
V3fVectorData( [ V3f(1), V3f(2), V3f(3) ], GeometricData.Interpretation.Vector ),
V3dVectorData( [ V3d(1), V3d(2), V3d(3) ], GeometricData.Interpretation.Vector ),
]
matrixTypes = [
M33fData( M33f() * 3 ),
M33dData( M33d() * 3 ),
M44fData( M44f().createScaled( V3f(3) ) ),
M44dData( M44d().createScaled( V3d(3) ) ),
TransformationMatrixfData( TransformationMatrixf( V3f( 3 ), Eulerf(), V3f( 0 ) ) ),
TransformationMatrixdData( TransformationMatrixd( V3d( 3 ), Eulerd(), V3d( 0 ) ) ),
]
for vector in vectorTypes:
targetVector = vector.copy()
for i in xrange( len( targetVector) ):
targetVector[ i ] = targetVector[ i ] * 3
for matrix in matrixTypes:
res = MatrixMultiplyOp()( object = vector.copy(), matrix = matrix )
if res == targetVector:
continue
raise Exception, "Error testing vector " + str(type(vector)) + " against matrix " + str(type(matrix)) + ". Resulted " + str( res )
def testInterpretations( self ) :
v = V3fVectorData( [ V3f( 1 ), V3f( 2 ), V3f( 3 ) ], GeometricData.Interpretation.Point )
o = MatrixMultiplyOp()
# as points
vt = o( object = v.copy(), matrix = M44fData( M44f.createTranslated( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertEqual( vt[i], v[i] + V3f( 1, 2, 3 ) )
# as vectors
v2 = v.copy()
v2.setInterpretation( GeometricData.Interpretation.Vector )
vt = o( object = v2, matrix = M44fData( M44f.createTranslated( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertEqual( vt[i], v[i] )
vt = o( object = v2, matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertEqual( vt[i], v[i] * V3f( 1, 2, 3 ) )
# as normals
v3 = v.copy()
v3.setInterpretation( GeometricData.Interpretation.Normal )
vt = o( object = v3, matrix = M44fData( M44f.createTranslated( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertEqual( vt[i], v[i] )
vt = o( object = v3, matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertNotEqual( vt[i], v[i] * V3f( 1, 2, 3 ) )
# nothing happens for numeric
v4 = v.copy()
v4.setInterpretation( GeometricData.Interpretation.Numeric )
vt = o( object = v4, matrix = M44fData( M44f.createTranslated( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertEqual( vt[i], v[i] )
vt = o( object = v4, matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertEqual( vt[i], v[i] )
# nothing happens for color
v5 = v.copy()
v5.setInterpretation( GeometricData.Interpretation.Color )
vt = o( object = v5, matrix = M44fData( M44f.createTranslated( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertEqual( vt[i], v[i] )
vt = o( object = v5, matrix = M44fData( M44f.createScaled( V3f( 1, 2, 3 ) ) ) )
for i in range( v.size() ) :
self.assertEqual( vt[i], v[i] )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Sumith1896/sympy | sympy/series/kauers.py | 5 | 1808 | from __future__ import print_function, division
def finite_diff(expression, variable, increment=1):
"""
Takes as input a polynomial expression and the variable used to construct
it and returns the difference between function's value when the input is
incremented to 1 and the original function value. If you want an increment
other than one supply it as a third argument.
Examples
=========
>>> from sympy.abc import x, y, z, k, n
>>> from sympy.series.kauers import finite_diff
>>> from sympy import Sum
>>> finite_diff(x**2, x)
2*x + 1
>>> finite_diff(y**3 + 2*y**2 + 3*y + 4, y)
3*y**2 + 7*y + 6
>>> finite_diff(x**2 + 3*x + 8, x, 2)
4*x + 10
>>> finite_diff(z**3 + 8*z, z, 3)
9*z**2 + 27*z + 51
"""
expression = expression.expand()
expression2 = expression.subs(variable, variable + increment)
expression2 = expression2.expand()
return expression2 - expression
def finite_diff_kauers(sum):
"""
Takes as input a Sum instance and returns the difference between the sum
with the upper index incremented by 1 and the original sum. For example,
if S(n) is a sum, then finite_diff_kauers will return S(n + 1) - S(n).
Examples
========
>>> from sympy.series.kauers import finite_diff_kauers
>>> from sympy import Sum
>>> from sympy.abc import x, y, m, n, k
>>> finite_diff_kauers(Sum(k, (k, 1, n)))
n + 1
>>> finite_diff_kauers(Sum(1/k, (k, 1, n)))
1/(n + 1)
>>> finite_diff_kauers(Sum((x*y**2), (x, 1, n), (y, 1, m)))
(m + 1)**2*(n + 1)
>>> finite_diff_kauers(Sum((x*y), (x, 1, m), (y, 1, n)))
(m + 1)*(n + 1)
"""
function = sum.function
for l in sum.limits:
function = function.subs(l[0], l[- 1] + 1)
return function
| bsd-3-clause |
jalexvig/tensorflow | tensorflow/tools/docs/generate_lib_test.py | 29 | 8239 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for doc generator traversal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import generate_lib
from tensorflow.tools.docs import parser
def test_function():
"""Docstring for test_function."""
pass
class TestClass(object):
"""Docstring for TestClass itself."""
class ChildClass(object):
"""Docstring for a child class."""
class GrandChildClass(object):
"""Docstring for a child of a child class."""
pass
class DummyVisitor(object):
def __init__(self, index, duplicate_of):
self.index = index
self.duplicate_of = duplicate_of
class GenerateTest(googletest.TestCase):
def get_test_objects(self):
# These are all mutable objects, so rebuild them for each test.
# Don't cache the objects.
module = sys.modules[__name__]
index = {
'tf': sys, # Can be any module, this test doesn't care about content.
'tf.TestModule': module,
'tf.test_function': test_function,
'tf.TestModule.test_function': test_function,
'tf.TestModule.TestClass': TestClass,
'tf.TestModule.TestClass.ChildClass': TestClass.ChildClass,
'tf.TestModule.TestClass.ChildClass.GrandChildClass':
TestClass.ChildClass.GrandChildClass,
}
tree = {
'tf': ['TestModule', 'test_function'],
'tf.TestModule': ['test_function', 'TestClass'],
'tf.TestModule.TestClass': ['ChildClass'],
'tf.TestModule.TestClass.ChildClass': ['GrandChildClass'],
'tf.TestModule.TestClass.ChildClass.GrandChildClass': []
}
duplicate_of = {'tf.test_function': 'tf.TestModule.test_function'}
duplicates = {
'tf.TestModule.test_function': [
'tf.test_function', 'tf.TestModule.test_function'
]
}
base_dir = os.path.dirname(__file__)
visitor = DummyVisitor(index, duplicate_of)
reference_resolver = parser.ReferenceResolver.from_visitor(
visitor=visitor, doc_index={}, py_module_names=['tf'])
parser_config = parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=duplicates,
duplicate_of=duplicate_of,
tree=tree,
index=index,
reverse_index={},
guide_index={},
base_dir=base_dir)
return reference_resolver, parser_config
def test_write(self):
_, parser_config = self.get_test_objects()
output_dir = googletest.GetTempDir()
generate_lib.write_docs(output_dir, parser_config, yaml_toc=True,
site_api_path='api_docs/python')
# Check redirects
redirects_file = os.path.join(output_dir, '_redirects.yaml')
self.assertTrue(os.path.exists(redirects_file))
with open(redirects_file) as f:
redirects = f.read()
self.assertEqual(redirects.split(), [
'redirects:', '-', 'from:', '/api_docs/python/tf/test_function', 'to:',
'/api_docs/python/tf/TestModule/test_function'
])
# Make sure that the right files are written to disk.
self.assertTrue(os.path.exists(os.path.join(output_dir, 'index.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, 'tf.md')))
self.assertTrue(os.path.exists(os.path.join(output_dir, '_toc.yaml')))
self.assertTrue(
os.path.exists(os.path.join(output_dir, 'tf/TestModule.md')))
self.assertFalse(
os.path.exists(os.path.join(output_dir, 'tf/test_function.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/TestClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(output_dir,
'tf/TestModule/TestClass/ChildClass.md')))
self.assertTrue(
os.path.exists(
os.path.join(
output_dir,
'tf/TestModule/TestClass/ChildClass/GrandChildClass.md')))
# Make sure that duplicates are not written
self.assertTrue(
os.path.exists(
os.path.join(output_dir, 'tf/TestModule/test_function.md')))
def test_update_id_tags_inplace(self):
test_dir = googletest.GetTempDir()
test_sub_dir = os.path.join(test_dir, 'a/b')
os.makedirs(test_sub_dir)
test_path1 = os.path.join(test_dir, 'file1.md')
test_path2 = os.path.join(test_sub_dir, 'file2.md')
test_path3 = os.path.join(test_sub_dir, 'file3.notmd')
with open(test_path1, 'w') as f:
f.write('## abc&123')
with open(test_path2, 'w') as f:
f.write('# A Level 1 Heading\n')
f.write('## A Level 2 Heading')
with open(test_path3, 'w') as f:
f.write("## don\'t change this")
generate_lib.update_id_tags_inplace(test_dir)
with open(test_path1) as f:
content = f.read()
self.assertEqual(content, '<h2 id="abc_123">abc&123</h2>')
with open(test_path2) as f:
content = f.read()
self.assertEqual(
content, '# A Level 1 Heading\n'
'<h2 id="A_Level_2_Heading">A Level 2 Heading</h2>')
with open(test_path3) as f:
content = f.read()
self.assertEqual(content, "## don\'t change this")
def test_replace_refes(self):
test_dir = googletest.GetTempDir()
test_in_dir = os.path.join(test_dir, 'in')
test_in_dir_a = os.path.join(test_dir, 'in/a')
test_in_dir_b = os.path.join(test_dir, 'in/b')
os.makedirs(test_in_dir)
os.makedirs(test_in_dir_a)
os.makedirs(test_in_dir_b)
test_out_dir = os.path.join(test_dir, 'out')
os.makedirs(test_out_dir)
test_path1 = os.path.join(test_in_dir_a, 'file1.md')
test_path2 = os.path.join(test_in_dir_b, 'file2.md')
test_path3 = os.path.join(test_in_dir_b, 'file3.notmd')
test_path4 = os.path.join(test_in_dir_b, 'OWNERS')
with open(test_path1, 'w') as f:
f.write('Use `tf.test_function` to test things.')
with open(test_path2, 'w') as f:
f.write('Use @{tf.TestModule.TestClass.ChildClass} to test things.\n'
"`tf.whatever` doesn't exist")
with open(test_path3, 'w') as f:
file3_content = (
'Not a .md file. Should be copied unchanged:'
'@{tf.TestModule.TestClass.ChildClass}, `tf.test_function`')
f.write(file3_content)
with open(test_path4, 'w') as f:
f.write('')
reference_resolver, _ = self.get_test_objects()
generate_lib.replace_refs(test_in_dir, test_out_dir, reference_resolver,
'*.md')
with open(os.path.join(test_out_dir, 'a/file1.md')) as f:
content = f.read()
self.assertEqual(
content,
'Use <a href="../api_docs/python/tf/TestModule/test_function.md">'
'<code>tf.test_function</code></a> to test things.')
with open(os.path.join(test_out_dir, 'b/file2.md')) as f:
content = f.read()
self.assertEqual(
content,
'Use '
'<a href="../api_docs/python/tf/TestModule/TestClass/ChildClass.md">'
'<code>tf.TestModule.TestClass.ChildClass</code></a> '
'to test things.\n'
'`tf.whatever` doesn\'t exist')
with open(os.path.join(test_out_dir, 'b/file3.notmd')) as f:
content = f.read()
self.assertEqual(content, file3_content)
with self.assertRaises(IOError):
# This should fail. The OWNERS file should not be copied
with open(os.path.join(test_out_dir, 'b/OWNERS')) as f:
content = f.read()
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
synconics/odoo | addons/hr_timesheet_sheet/hr_timesheet_sheet.py | 36 | 36165 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
import pytz
from openerp.osv import fields, osv
from openerp.tools import (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
drop_view_if_exists,
)
from openerp.tools.translate import _
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = dict.fromkeys(ids, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
cr.execute("""
SELECT sheet_id as id,
sum(total_attendance) as total_attendance,
sum(total_timesheet) as total_timesheet,
sum(total_difference) as total_difference
FROM hr_timesheet_sheet_sheet_day
WHERE sheet_id IN %s
GROUP BY sheet_id
""", (tuple(ids),))
res.update(dict((x.pop('id'), x) for x in cr.dictfetchall()))
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
if att_tuple[2] and att_tuple[2].has_key('name'):
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
sheet.signal_workflow('confirm')
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Timesheet = self.pool['hr.analytic.timesheet']
Attendance = self.pool['hr.attendance']
return {
sheet_id: {
'timesheet_activity_count': Timesheet.search_count(cr,uid, [('sheet_id','=', sheet_id)], context=context),
'attendance_count': Attendance.search_count(cr,uid, [('sheet_id', '=', sheet_id)], context=context)
}
for sheet_id in ids
}
_columns = {
'name': fields.char('Note', select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
'timesheet_activity_count': fields.function(_count_all, type='integer', string='Timesheet Activities', multi=True),
'attendance_count': fields.function(_count_all, type='integer', string="Attendances", multi=True),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return fields.date.context_today(self, cr, uid, context)
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return fields.date.context_today(self, cr, uid, context)
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.employee_id.user_id and sheet.employee_id.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
toremove = []
analytic_timesheet = self.pool.get('hr.analytic.timesheet')
for sheet in self.browse(cr, uid, ids, context=context):
for timesheet in sheet.timesheet_ids:
toremove.append(timesheet.id)
analytic_timesheet.unlink(cr, uid, toremove, context=context)
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
def name_create(self, cr, uid, name, context=None):
if context is None:
context = {}
group_template_required = self.pool['res.users'].has_group(cr, uid, 'account_analytic_analysis.group_template_required')
if not context.get('default_use_timesheets') or group_template_required:
return super(account_analytic_account, self).name_create(cr, uid, name, context=context)
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id),
('state', 'in', ['draft', 'new'])],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def write(self, cr, uid, ids, values, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line, self).write(cr, uid, ids, values, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
LEFT JOIN res_users u
ON r.user_id = u.id
LEFT JOIN res_partner p
ON u.partner_id = p.id
WHERE %(date_to)s >= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))
AND %(date_from)s <= date_trunc('day', a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None):
""" Simulate timesheet in employee timezone
Return the attendance date in string format in the employee
tz converted from utc timezone as we consider date of employee
timesheet is in employee timezone
"""
employee_obj = self.pool['hr.employee']
tz = False
if employee_id:
employee = employee_obj.browse(cr, uid, employee_id, context=context)
tz = employee.user_id.partner_id.tz
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz = timezone(tz or 'utc')
attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_dt = pytz.utc.localize(attendance_dt)
att_tz_dt = att_tz_dt.astimezone(att_tz)
# We take only the date omiting the hours as we compare with timesheet
# date_from which is a date format thus using hours would lead to
# be out of scope of timesheet
att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT)
return att_tz_date_str
def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None):
sheet_obj = self.pool['hr_timesheet_sheet.sheet']
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, employee_id,
date=date, context=context)
sheet_ids = sheet_obj.search(cr, uid,
[('date_from', '<=', att_tz_date_str),
('date_to', '>=', att_tz_date_str),
('employee_id', '=', employee_id)],
limit=1, context=context)
return sheet_ids and sheet_ids[0] or False
def _sheet(self, cursor, user, ids, name, args, context=None):
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
res[attendance.id] = self._get_current_sheet(
cursor, user, attendance.employee_id.id, attendance.name,
context=context)
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context)
if sheet_id:
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, vals.get('employee_id'),
date=vals.get('name'), context=context)
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.'))
elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str:
raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.'))
return super(hr_attendance,self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
_depends = {
'account.analytic.line': ['date', 'unit_amount'],
'hr.analytic.timesheet': ['line_id', 'sheet_id'],
'hr.attendance': ['action', 'name', 'sheet_id'],
}
def init(self, cr):
drop_view_if_exists(cr, 'hr_timesheet_sheet_sheet_day')
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
timezone,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(orphan_attendances) != 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC')) * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC' AT TIME ZONE coalesce(timezone, 'UTC'))
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
p.tz as timezone,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0 as orphan_attendances,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
JOIN hr_employee e ON s.employee_id = e.id
JOIN resource_resource r ON e.resource_id = r.id
LEFT JOIN res_users u ON r.user_id = u.id
LEFT JOIN res_partner p ON u.partner_id = p.id
group by l.date::date, s.id, timezone
) union (
select
-min(a.id) as id,
p.tz as timezone,
(a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END) as orphan_attendances,
SUM(((EXTRACT(hour FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))) * 60) + EXTRACT(minute FROM (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC')))) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
JOIN hr_employee e
ON a.employee_id = e.id
JOIN resource_resource r
ON e.resource_id = r.id
LEFT JOIN res_users u
ON r.user_id = u.id
LEFT JOIN res_partner p
ON u.partner_id = p.id
WHERE action in ('sign_in', 'sign_out')
group by (a.name AT TIME ZONE 'UTC' AT TIME ZONE coalesce(p.tz, 'UTC'))::date, s.id, timezone
)) AS foo
GROUP BY name, sheet_id, timezone
)) AS bar""")
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'to_invoice', 'unit_amount', 'user_id'],
'hr.analytic.timesheet': ['line_id'],
'hr_timesheet_sheet.sheet': ['date_from', 'date_to', 'user_id'],
}
def init(self, cr):
drop_view_if_exists(cr, 'hr_timesheet_sheet_sheet_account')
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None):
Sheet = self.pool['hr_timesheet_sheet.sheet']
return {
employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
karlgluck/heroes-of-the-storm-replay-parser | s2protocol/protocol18574.py | 14 | 20388 | # Copyright (c) 2013 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,6)]), #2
('_int',[(0,14)]), #3
('_int',[(0,22)]), #4
('_int',[(0,32)]), #5
('_choice',[(0,2),{0:('m_uint6',2),1:('m_uint14',3),2:('m_uint22',4),3:('m_uint32',5)}]), #6
('_int',[(0,5)]), #7
('_struct',[[('m_playerId',7,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',5,4),('m_baseBuild',5,5)]]), #11
('_int',[(0,3)]), #12
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',5,3)]]), #13
('_fourcc',[]), #14
('_blob',[(0,7)]), #15
('_int',[(0,64)]), #16
('_struct',[[('m_region',10,0),('m_programId',14,1),('m_realm',5,2),('m_name',15,3),('m_id',16,4)]]), #17
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #18
('_int',[(0,2)]), #19
('_struct',[[('m_name',9,0),('m_toon',17,1),('m_race',9,2),('m_color',18,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',19,7),('m_result',19,8)]]), #20
('_array',[(0,5),20]), #21
('_optional',[21]), #22
('_blob',[(0,10)]), #23
('_blob',[(0,11)]), #24
('_struct',[[('m_file',24,0)]]), #25
('_bool',[]), #26
('_int',[(-9223372036854775808,64)]), #27
('_blob',[(0,12)]), #28
('_blob',[(40,0)]), #29
('_array',[(0,4),29]), #30
('_optional',[30]), #31
('_struct',[[('m_playerList',22,0),('m_title',23,1),('m_difficulty',9,2),('m_thumbnail',25,3),('m_isBlizzardMap',26,4),('m_timeUTC',27,5),('m_timeLocalOffset',27,6),('m_description',28,7),('m_imageFilePath',24,8),('m_mapFileName',24,9),('m_cacheHandles',31,10),('m_miniSave',26,11),('m_gameSpeed',12,12),('m_defaultDifficulty',2,13)]]), #32
('_optional',[10]), #33
('_struct',[[('m_race',33,-1)]]), #34
('_struct',[[('m_team',33,-1)]]), #35
('_struct',[[('m_name',9,-7),('m_randomSeed',5,-6),('m_racePreference',34,-5),('m_teamPreference',35,-4),('m_testMap',26,-3),('m_testAuto',26,-2),('m_observe',19,-1)]]), #36
('_array',[(0,5),36]), #37
('_struct',[[('m_lockTeams',26,-11),('m_teamsTogether',26,-10),('m_advancedSharedControl',26,-9),('m_randomRaces',26,-8),('m_battleNet',26,-7),('m_amm',26,-6),('m_ranked',26,-5),('m_noVictoryOrDefeat',26,-4),('m_fog',19,-3),('m_observers',19,-2),('m_userDifficulty',19,-1)]]), #38
('_int',[(1,4)]), #39
('_int',[(1,8)]), #40
('_bitarray',[(0,6)]), #41
('_bitarray',[(0,8)]), #42
('_bitarray',[(0,2)]), #43
('_struct',[[('m_allowedColors',41,-5),('m_allowedRaces',42,-4),('m_allowedDifficulty',41,-3),('m_allowedControls',42,-2),('m_allowedObserveTypes',43,-1)]]), #44
('_array',[(0,5),44]), #45
('_struct',[[('m_randomValue',5,-23),('m_gameCacheName',23,-22),('m_gameOptions',38,-21),('m_gameSpeed',12,-20),('m_gameType',12,-19),('m_maxUsers',7,-18),('m_maxObservers',7,-17),('m_maxPlayers',7,-16),('m_maxTeams',39,-15),('m_maxColors',2,-14),('m_maxRaces',40,-13),('m_maxControls',40,-12),('m_mapSizeX',10,-11),('m_mapSizeY',10,-10),('m_mapFileSyncChecksum',5,-9),('m_mapFileName',24,-8),('m_mapAuthorName',9,-7),('m_modFileSyncChecksum',5,-6),('m_slotDescriptions',45,-5),('m_defaultDifficulty',2,-4),('m_cacheHandles',30,-3),('m_isBlizzardMap',26,-2),('m_isPremadeFFA',26,-1)]]), #46
('_optional',[1]), #47
('_optional',[7]), #48
('_struct',[[('m_color',48,-1)]]), #49
('_array',[(0,5),5]), #50
('_struct',[[('m_control',10,-10),('m_userId',47,-9),('m_teamId',1,-8),('m_colorPref',49,-7),('m_racePref',34,-6),('m_difficulty',2,-5),('m_handicap',0,-4),('m_observe',19,-3),('m_rewards',50,-2),('m_toonHandle',15,-1)]]), #51
('_array',[(0,5),51]), #52
('_struct',[[('m_phase',12,-9),('m_maxUsers',7,-8),('m_maxObservers',7,-7),('m_slots',52,-6),('m_randomSeed',5,-5),('m_hostUserId',47,-4),('m_isSinglePlayer',26,-3),('m_gameDuration',5,-2),('m_defaultDifficulty',2,-1)]]), #53
('_struct',[[('m_userInitialData',37,-3),('m_gameDescription',46,-2),('m_lobbyState',53,-1)]]), #54
('_struct',[[('m_syncLobbyState',54,-1)]]), #55
('_struct',[[('m_name',15,-1)]]), #56
('_blob',[(0,6)]), #57
('_struct',[[('m_name',57,-1)]]), #58
('_struct',[[('m_name',57,-3),('m_type',5,-2),('m_data',15,-1)]]), #59
('_struct',[[('m_type',5,-3),('m_name',57,-2),('m_data',28,-1)]]), #60
('_array',[(0,5),10]), #61
('_struct',[[('m_signature',61,-1)]]), #62
('_struct',[[('m_developmentCheatsEnabled',26,-4),('m_multiplayerCheatsEnabled',26,-3),('m_syncChecksummingEnabled',26,-2),('m_isMapToMapTransition',26,-1)]]), #63
('_struct',[[]]), #64
('_struct',[[('m_fileName',24,-5),('m_automatic',26,-4),('m_overwrite',26,-3),('m_name',9,-2),('m_description',23,-1)]]), #65
('_int',[(-2147483648,32)]), #66
('_struct',[[('x',66,-2),('y',66,-1)]]), #67
('_struct',[[('m_point',67,-4),('m_time',66,-3),('m_verb',23,-2),('m_arguments',23,-1)]]), #68
('_struct',[[('m_data',68,-1)]]), #69
('_int',[(0,18)]), #70
('_int',[(0,16)]), #71
('_struct',[[('m_abilLink',71,-3),('m_abilCmdIndex',7,-2),('m_abilCmdData',33,-1)]]), #72
('_optional',[72]), #73
('_null',[]), #74
('_int',[(0,20)]), #75
('_struct',[[('x',75,-3),('y',75,-2),('z',66,-1)]]), #76
('_struct',[[('m_targetUnitFlags',10,-6),('m_timer',10,-5),('m_tag',5,-4),('m_snapshotUnitLink',71,-3),('m_snapshotPlayerId',47,-2),('m_snapshotPoint',76,-1)]]), #77
('_choice',[(0,2),{0:('None',74),1:('TargetPoint',76),2:('TargetUnit',77),3:('Data',5)}]), #78
('_optional',[5]), #79
('_struct',[[('m_cmdFlags',70,-4),('m_abil',73,-3),('m_data',78,-2),('m_otherUnit',79,-1)]]), #80
('_array',[(0,8),10]), #81
('_choice',[(0,2),{0:('None',74),1:('Mask',42),2:('OneIndices',81),3:('ZeroIndices',81)}]), #82
('_struct',[[('m_unitLink',71,-3),('m_intraSubgroupPriority',10,-2),('m_count',10,-1)]]), #83
('_array',[(0,8),83]), #84
('_array',[(0,8),5]), #85
('_struct',[[('m_subgroupIndex',10,-4),('m_removeMask',82,-3),('m_addSubgroups',84,-2),('m_addUnitTags',85,-1)]]), #86
('_struct',[[('m_controlGroupId',1,-2),('m_delta',86,-1)]]), #87
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',19,-2),('m_mask',82,-1)]]), #88
('_struct',[[('m_count',10,-6),('m_subgroupCount',10,-5),('m_activeSubgroupIndex',10,-4),('m_unitTagsChecksum',5,-3),('m_subgroupIndicesChecksum',5,-2),('m_subgroupsChecksum',5,-1)]]), #89
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',89,-1)]]), #90
('_array',[(0,3),66]), #91
('_struct',[[('m_recipientId',1,-2),('m_resources',91,-1)]]), #92
('_struct',[[('m_chatMessage',23,-1)]]), #93
('_int',[(-128,8)]), #94
('_struct',[[('x',66,-3),('y',66,-2),('z',66,-1)]]), #95
('_struct',[[('m_beacon',94,-7),('m_ally',94,-6),('m_autocast',94,-5),('m_targetUnitTag',5,-4),('m_targetUnitSnapshotUnitLink',71,-3),('m_targetUnitSnapshotPlayerId',47,-2),('m_targetPoint',95,-1)]]), #96
('_struct',[[('m_speed',12,-1)]]), #97
('_struct',[[('m_delta',94,-1)]]), #98
('_struct',[[('m_verb',23,-2),('m_arguments',23,-1)]]), #99
('_struct',[[('m_alliance',5,-2),('m_control',5,-1)]]), #100
('_struct',[[('m_unitTag',5,-1)]]), #101
('_struct',[[('m_unitTag',5,-2),('m_flags',10,-1)]]), #102
('_struct',[[('m_conversationId',66,-2),('m_replyId',66,-1)]]), #103
('_struct',[[('m_purchaseItemId',66,-1)]]), #104
('_struct',[[('m_difficultyLevel',66,-1)]]), #105
('_choice',[(0,3),{0:('None',74),1:('Checked',26),2:('ValueChanged',5),3:('SelectionChanged',66),4:('TextChanged',24)}]), #106
('_struct',[[('m_controlId',66,-3),('m_eventType',66,-2),('m_eventData',106,-1)]]), #107
('_struct',[[('m_soundHash',5,-2),('m_length',5,-1)]]), #108
('_struct',[[('m_soundHash',85,-2),('m_length',85,-1)]]), #109
('_struct',[[('m_syncInfo',109,-1)]]), #110
('_struct',[[('m_sound',5,-1)]]), #111
('_struct',[[('m_transmissionId',66,-1)]]), #112
('_struct',[[('x',71,-2),('y',71,-1)]]), #113
('_optional',[71]), #114
('_struct',[[('m_target',113,-4),('m_distance',114,-3),('m_pitch',114,-2),('m_yaw',114,-1)]]), #115
('_int',[(0,1)]), #116
('_struct',[[('m_skipType',116,-1)]]), #117
('_int',[(0,11)]), #118
('_struct',[[('x',118,-2),('y',118,-1)]]), #119
('_struct',[[('m_button',5,-4),('m_down',26,-3),('m_posUI',119,-2),('m_posWorld',76,-1)]]), #120
('_struct',[[('m_posUI',119,-2),('m_posWorld',76,-1)]]), #121
('_struct',[[('m_soundtrack',5,-1)]]), #122
('_struct',[[('m_planetId',66,-1)]]), #123
('_struct',[[('m_key',94,-2),('m_flags',94,-1)]]), #124
('_struct',[[('m_resources',91,-1)]]), #125
('_struct',[[('m_fulfillRequestId',66,-1)]]), #126
('_struct',[[('m_cancelRequestId',66,-1)]]), #127
('_struct',[[('m_researchItemId',66,-1)]]), #128
('_struct',[[('m_laggingPlayerId',1,-1)]]), #129
('_struct',[[('m_mercenaryId',66,-1)]]), #130
('_struct',[[('m_battleReportId',66,-2),('m_difficultyLevel',66,-1)]]), #131
('_struct',[[('m_battleReportId',66,-1)]]), #132
('_int',[(0,19)]), #133
('_struct',[[('m_decrementMs',133,-1)]]), #134
('_struct',[[('m_portraitId',66,-1)]]), #135
('_struct',[[('m_functionName',15,-1)]]), #136
('_struct',[[('m_result',66,-1)]]), #137
('_struct',[[('m_gameMenuItemIndex',66,-1)]]), #138
('_struct',[[('m_reason',94,-1)]]), #139
('_struct',[[('m_purchaseCategoryId',66,-1)]]), #140
('_struct',[[('m_button',71,-1)]]), #141
('_struct',[[('m_recipient',19,-2),('m_string',24,-1)]]), #142
('_struct',[[('m_recipient',19,-2),('m_point',67,-1)]]), #143
('_struct',[[('m_progress',66,-1)]]), #144
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (64, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (56, 'NNet.Game.SBankFileEvent'),
8: (58, 'NNet.Game.SBankSectionEvent'),
9: (59, 'NNet.Game.SBankKeyEvent'),
10: (60, 'NNet.Game.SBankValueEvent'),
11: (62, 'NNet.Game.SBankSignatureEvent'),
12: (63, 'NNet.Game.SUserOptionsEvent'),
22: (65, 'NNet.Game.SSaveGameEvent'),
23: (64, 'NNet.Game.SSaveGameDoneEvent'),
25: (64, 'NNet.Game.SPlayerLeaveEvent'),
26: (69, 'NNet.Game.SGameCheatEvent'),
27: (80, 'NNet.Game.SCmdEvent'),
28: (87, 'NNet.Game.SSelectionDeltaEvent'),
29: (88, 'NNet.Game.SControlGroupUpdateEvent'),
30: (90, 'NNet.Game.SSelectionSyncCheckEvent'),
31: (92, 'NNet.Game.SResourceTradeEvent'),
32: (93, 'NNet.Game.STriggerChatMessageEvent'),
33: (96, 'NNet.Game.SAICommunicateEvent'),
34: (97, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (98, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
37: (99, 'NNet.Game.SBroadcastCheatEvent'),
38: (100, 'NNet.Game.SAllianceEvent'),
39: (101, 'NNet.Game.SUnitClickEvent'),
40: (102, 'NNet.Game.SUnitHighlightEvent'),
41: (103, 'NNet.Game.STriggerReplySelectedEvent'),
44: (64, 'NNet.Game.STriggerSkippedEvent'),
45: (108, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (111, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (112, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (112, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (115, 'NNet.Game.SCameraUpdateEvent'),
50: (64, 'NNet.Game.STriggerAbortMissionEvent'),
51: (104, 'NNet.Game.STriggerPurchaseMadeEvent'),
52: (64, 'NNet.Game.STriggerPurchaseExitEvent'),
53: (105, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'),
54: (64, 'NNet.Game.STriggerPlanetPanelCanceledEvent'),
55: (107, 'NNet.Game.STriggerDialogControlEvent'),
56: (110, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (117, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (120, 'NNet.Game.STriggerMouseClickedEvent'),
59: (121, 'NNet.Game.STriggerMouseMovedEvent'),
63: (64, 'NNet.Game.STriggerPlanetPanelReplayEvent'),
64: (122, 'NNet.Game.STriggerSoundtrackDoneEvent'),
65: (123, 'NNet.Game.STriggerPlanetMissionSelectedEvent'),
66: (124, 'NNet.Game.STriggerKeyPressedEvent'),
67: (136, 'NNet.Game.STriggerMovieFunctionEvent'),
68: (64, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'),
69: (64, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'),
70: (125, 'NNet.Game.SResourceRequestEvent'),
71: (126, 'NNet.Game.SResourceRequestFulfillEvent'),
72: (127, 'NNet.Game.SResourceRequestCancelEvent'),
73: (64, 'NNet.Game.STriggerResearchPanelExitEvent'),
74: (64, 'NNet.Game.STriggerResearchPanelPurchaseEvent'),
75: (128, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'),
76: (129, 'NNet.Game.SLagMessageEvent'),
77: (64, 'NNet.Game.STriggerMercenaryPanelExitEvent'),
78: (64, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'),
79: (130, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'),
80: (64, 'NNet.Game.STriggerVictoryPanelExitEvent'),
81: (64, 'NNet.Game.STriggerBattleReportPanelExitEvent'),
82: (131, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'),
83: (132, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'),
84: (132, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'),
85: (105, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'),
86: (64, 'NNet.Game.STriggerMovieStartedEvent'),
87: (64, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (134, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (135, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (137, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (138, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (139, 'NNet.Game.STriggerCameraMoveEvent'),
93: (104, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'),
94: (140, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'),
95: (141, 'NNet.Game.STriggerButtonPressedEvent'),
96: (64, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (142, 'NNet.Game.SChatMessage'),
1: (143, 'NNet.Game.SPingMessage'),
2: (144, 'NNet.Game.SLoadingProgressMessage'),
3: (64, 'NNet.Game.SServerPingMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 6
# The typeid of NNet.Replay.SPlayerId (the type used to encode player ids).
replay_playerid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 13
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 32
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 55
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for k,v in value.iteritems():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_player_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_player_id:
playerid = decoder.instance(replay_playerid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_player_id:
event['_playerid'] = playerid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_player_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_player_id=True):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip('\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
CydarLtd/ansible | lib/ansible/modules/cloud/amazon/ecs_ecr.py | 51 | 11804 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_ecr
version_added: "2.3"
short_description: Manage Elastic Container Registry repositories
description:
- Manage Elastic Container Registry repositories
options:
name:
description:
- the name of the repository
required: true
registry_id:
description:
- AWS account id associated with the registry.
- If not specified, the default registry is assumed.
required: false
policy:
description:
- JSON or dict that represents the new policy
required: false
force_set_policy:
description:
- if no, prevents setting a policy that would prevent you from
setting another policy in the future.
required: false
default: false
delete_policy:
description:
- if yes, remove the policy from the repository
required: false
default: false
state:
description:
- create or destroy the repository
required: false
choices: [present, absent]
default: 'present'
author:
- David M. Lee (@leedm777)
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# If the repository does not exist, it is created. If it does exist, would not
# affect any policies already on it.
- name: ecr-repo
ecs_ecr: name=super/cool
- name: destroy-ecr-repo
ecs_ecr: name=old/busted state=absent
- name: Cross account ecr-repo
ecs_ecr: registry_id=999999999999 name=cross/account
- name: set-policy as object
ecs_ecr:
name: needs-policy-object
policy:
Version: '2008-10-17'
Statement:
- Sid: read-only
Effect: Allow
Principal:
AWS: '{{ read_only_arn }}'
Action:
- ecr:GetDownloadUrlForLayer
- ecr:BatchGetImage
- ecr:BatchCheckLayerAvailability
- name: set-policy as string
ecs_ecr:
name: needs-policy-string
policy: "{{ lookup('template', 'policy.json.j2') }}"
- name: delete-policy
ecs_ecr:
name: needs-no-policy
delete_policy: yes
'''
RETURN = '''
state:
type: string
description: The asserted state of the repository (present, absent)
returned: always
created:
type: boolean
description: If true, the repository was created
returned: always
name:
type: string
description: The name of the repository
returned: "when state == 'absent'"
repository:
type: dict
description: The created or updated repository
returned: "when state == 'present'"
sample:
createdAt: '2017-01-17T08:41:32-06:00'
registryId: '999999999999'
repositoryArn: arn:aws:ecr:us-east-1:999999999999:repository/ecr-test-1484664090
repositoryName: ecr-test-1484664090
repositoryUri: 999999999999.dkr.ecr.us-east-1.amazonaws.com/ecr-test-1484664090
'''
import json
import time
import inspect
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def boto_exception(err):
'''boto error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def build_kwargs(registry_id):
"""
Builds a kwargs dict which may contain the optional registryId.
:param registry_id: Optional string containing the registryId.
:return: kwargs dict with registryId, if given
"""
if not registry_id:
return dict()
else:
return dict(registryId=registry_id)
class EcsEcr:
def __init__(self, module):
region, ec2_url, aws_connect_kwargs = \
get_aws_connection_info(module, boto3=True)
self.ecr = boto3_conn(module, conn_type='client',
resource='ecr', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
self.check_mode = module.check_mode
self.changed = False
self.skipped = False
def get_repository(self, registry_id, name):
try:
res = self.ecr.describe_repositories(
repositoryNames=[name], **build_kwargs(registry_id))
repos = res.get('repositories')
return repos and repos[0]
except ClientError as err:
code = err.response['Error'].get('Code', 'Unknown')
if code == 'RepositoryNotFoundException':
return None
raise
def get_repository_policy(self, registry_id, name):
try:
res = self.ecr.get_repository_policy(
repositoryName=name, **build_kwargs(registry_id))
text = res.get('policyText')
return text and json.loads(text)
except ClientError as err:
code = err.response['Error'].get('Code', 'Unknown')
if code == 'RepositoryPolicyNotFoundException':
return None
raise
def create_repository(self, registry_id, name):
if not self.check_mode:
repo = self.ecr.create_repository(
repositoryName=name, **build_kwargs(registry_id)).get(
'repository')
self.changed = True
return repo
else:
self.skipped = True
return dict(repositoryName=name)
def set_repository_policy(self, registry_id, name, policy_text, force):
if not self.check_mode:
policy = self.ecr.set_repository_policy(
repositoryName=name,
policyText=policy_text,
force=force,
**build_kwargs(registry_id))
self.changed = True
return policy
else:
self.skipped = True
if self.get_repository(registry_id, name) is None:
printable = name
if registry_id:
printable = '{}:{}'.format(registry_id, name)
raise Exception(
'could not find repository {}'.format(printable))
return
def delete_repository(self, registry_id, name):
if not self.check_mode:
repo = self.ecr.delete_repository(
repositoryName=name, **build_kwargs(registry_id))
self.changed = True
return repo
else:
repo = self.get_repository(registry_id, name)
if repo:
self.skipped = True
return repo
return None
def delete_repository_policy(self, registry_id, name):
if not self.check_mode:
policy = self.ecr.delete_repository_policy(
repositoryName=name, **build_kwargs(registry_id))
self.changed = True
return policy
else:
policy = self.get_repository_policy(registry_id, name)
if policy:
self.skipped = True
return policy
return None
def run(ecr, params, verbosity):
# type: (EcsEcr, dict, int) -> Tuple[bool, dict]
result = {}
try:
name = params['name']
state = params['state']
policy_text = params['policy']
delete_policy = params['delete_policy']
registry_id = params['registry_id']
force_set_policy = params['force_set_policy']
# If a policy was given, parse it
policy = policy_text and json.loads(policy_text)
result['state'] = state
result['created'] = False
repo = ecr.get_repository(registry_id, name)
if state == 'present':
result['created'] = False
if not repo:
repo = ecr.create_repository(registry_id, name)
result['changed'] = True
result['created'] = True
result['repository'] = repo
if delete_policy:
original_policy = ecr.get_repository_policy(registry_id, name)
if verbosity >= 2:
result['policy'] = None
if verbosity >= 3:
result['original_policy'] = original_policy
if original_policy:
ecr.delete_repository_policy(registry_id, name)
result['changed'] = True
elif policy_text is not None:
try:
policy = sort_json_policy_dict(policy)
if verbosity >= 2:
result['policy'] = policy
original_policy = ecr.get_repository_policy(
registry_id, name)
if original_policy:
original_policy = sort_json_policy_dict(original_policy)
if verbosity >= 3:
result['original_policy'] = original_policy
if original_policy != policy:
ecr.set_repository_policy(
registry_id, name, policy_text, force_set_policy)
result['changed'] = True
except:
# Some failure w/ the policy. It's helpful to know what the
# policy is.
result['policy'] = policy_text
raise
elif state == 'absent':
result['name'] = name
if repo:
ecr.delete_repository(registry_id, name)
result['changed'] = True
except Exception as err:
msg = str(err)
if isinstance(err, ClientError):
msg = boto_exception(err)
result['msg'] = msg
result['exception'] = traceback.format_exc()
return False, result
if ecr.skipped:
result['skipped'] = True
if ecr.changed:
result['changed'] = True
return True, result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
registry_id=dict(required=False),
state=dict(required=False, choices=['present', 'absent'],
default='present'),
force_set_policy=dict(required=False, type='bool', default=False),
policy=dict(required=False, type='json'),
delete_policy=dict(required=False, type='bool')))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['policy', 'delete_policy']])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
ecr = EcsEcr(module)
passed, result = run(ecr, module.params, module._verbosity)
if passed:
module.exit_json(**result)
else:
module.fail_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
wkschwartz/django | tests/db_functions/text/test_lower.py | 71 | 1453 | from django.db.models import CharField
from django.db.models.functions import Lower
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class LowerTests(TestCase):
def test_basic(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(lower_name=Lower('name'))
self.assertQuerysetEqual(
authors.order_by('name'), ['john smith', 'rhonda'],
lambda a: a.lower_name
)
Author.objects.update(name=Lower('name'))
self.assertQuerysetEqual(
authors.order_by('name'), [
('john smith', 'john smith'),
('rhonda', 'rhonda'),
],
lambda a: (a.lower_name, a.name)
)
def test_num_args(self):
with self.assertRaisesMessage(TypeError, "'Lower' takes exactly 1 argument (2 given)"):
Author.objects.update(name=Lower('name', 'name'))
def test_transform(self):
with register_lookup(CharField, Lower):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.filter(name__lower__exact='john smith')
self.assertQuerysetEqual(
authors.order_by('name'), ['John Smith'],
lambda a: a.name
)
| bsd-3-clause |
NCTU-PCCA/NCTU_Yggdrasill | codebook/gentex.py | 1 | 3036 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
doc_class = '\documentclass [landscape,8pt,a4paper,twocolumn]{article}'
head = '''\\title {NCTU\\_Yggdarsill Codebook}
\\usepackage{parskip}
\\usepackage{xeCJK}
\\setCJKmainfont{SourceHanSerifTW-Light}
\\setmonofont{Courier New}
\\usepackage {listings}
\\usepackage {color}
\\usepackage [left=1.0cm, right=1.0cm, top=2.0cm, bottom=0.5cm]{geometry}
\\definecolor {mygreen}{rgb}{0,0.6,0}
\\definecolor {mygray}{rgb}{0.5,0.5,0.5}
\\definecolor {mymauve}{rgb}{0.58,0,0.82}
\\usepackage{fancyheadings}
\\rhead{\\thepage}
\\chead{初始化?陣列大小?\\texttt{x, y}沒寫反?爆\\texttt{int}?1-based?好,傳囉!}
\\lhead{NCTU\\_Yggdarsill}
\\pagestyle{fancy}
\\cfoot{}
\\setlength{\\headsep}{5pt}
\\setlength{\\textheight}{540pt}
'''
lstset = '''\\lstset {
backgroundcolor=\\color{white},
basicstyle=\\footnotesize\\ttfamily,
breakatwhitespace=false,
breaklines=true,
captionpos=b,
commentstyle=\\color{mygreen},
deletekeywords={...},
escapeinside={\\%*}{*)},
extendedchars=true,
frame=single,
keepspaces=true,
keywordstyle=\\color{blue},
language=Octave,
morekeywords={*,...},
numbers=left,
numbersep=4pt,
numberstyle=\\scriptsize\\ttfamily\\color{mygray},
rulecolor=\\color{black},
showspaces=false,
showstringspaces=false,
showtabs=false,
stepnumber=1,
stringstyle=\\color{mymauve},
tabsize=2,
xleftmargin=15pt,
framexleftmargin=15pt,
framexrightmargin=0pt,
framexbottommargin=0pt,
framextopmargin=0pt,
}
'''
doc_head = '''\\begin {document}
\\thispagestyle{fancy}
{ \\Huge NCTU\\_Yggdarsill}
\\tableofcontents
'''
tail = '''
\\input{note.tex}
\\end{document}
'''
src_types = ['.c', '.cpp', '.java', '.py', '.sublime-build', '.sublime-settings', '.sh']
lang_list = {
'.c': 'c',
'.cpp': 'c++',
'.java': 'java',
'.py': 'python',
'.sublime-build': 'bash',
'.sublime-settings': 'bash',
'.sh': 'bash'
}
import os
import sys
def escape(s):
spe = '#$%&_{}'
for c in spe:
s = s.replace(c, '\\'+c)
return s
def capitalize(line):
return ' '.join(s[0].upper() + s[1:] for s in line.split(' '))
outname = 'codebook.tex'
sys.stdout = open(outname, 'w')
print(doc_class)
print(head)
print(lstset)
print(doc_head)
for root, dirs, files in os.walk('code'):
if root.find('.svn') >= 0:
continue
secname = os.path.basename(root)
if secname.startswith('.'):
continue
if escape(secname) == 'code':
continue
section_name = capitalize(secname.replace('_', ' '))
print(f'\\section{{{section_name}}}')
for name in files:
base, ext = os.path.splitext(name)
if name != 'vimrc' and ext not in src_types:
continue
subsection_name = capitalize(base.replace('_', ' '))
print(f'\\subsection{{{subsection_name}}}')
if ext not in src_types:
lang = 'bash'
else:
lang = lang_list[ext]
print(f'\\lstinputlisting [language={lang}] {{\"code/{escape(secname)}/{name}\"}}')
print(tail)
sys.stdout.flush()
# subprocess.call(['bg5pdflatex', outname])
| mit |
sjebbara/gensim | gensim/interfaces.py | 35 | 10708 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module contains basic interfaces used throughout the whole gensim package.
The interfaces are realized as abstract base classes (ie., some optional functionality
is provided in the interface itself, so that the interfaces can be subclassed).
"""
from __future__ import with_statement
import logging
import itertools
from gensim import utils, matutils
from six.moves import xrange
logger = logging.getLogger('gensim.interfaces')
class CorpusABC(utils.SaveLoad):
"""
Interface (abstract base class) for corpora. A *corpus* is simply an iterable,
where each iteration step yields one document:
>>> for doc in corpus:
>>> # do something with the doc...
A document is a sequence of `(fieldId, fieldValue)` 2-tuples:
>>> for attr_id, attr_value in doc:
>>> # do something with the attribute
Note that although a default :func:`len` method is provided, it is very inefficient
(performs a linear scan through the corpus to determine its length). Wherever
the corpus size is needed and known in advance (or at least doesn't change so
that it can be cached), the :func:`len` method should be overridden.
See the :mod:`gensim.corpora.svmlightcorpus` module for an example of a corpus.
Saving the corpus with the `save` method (inherited from `utils.SaveLoad`) will
only store the *in-memory* (binary, pickled) object representation=the stream
state, and **not** the documents themselves. See the `save_corpus` static method
for serializing the actual stream content.
"""
def __iter__(self):
"""
Iterate over the corpus, yielding one document at a time.
"""
raise NotImplementedError('cannot instantiate abstract base class')
def save(self, *args, **kwargs):
import warnings
warnings.warn("corpus.save() stores only the (tiny) iteration object; "
"to serialize the actual corpus content, use e.g. MmCorpus.serialize(corpus)")
super(CorpusABC, self).save(*args, **kwargs)
def __len__(self):
"""
Return the number of documents in the corpus.
This method is just the least common denominator and should really be
overridden when possible.
"""
raise NotImplementedError("must override __len__() before calling len(corpus)")
# logger.warning("performing full corpus scan to determine its length; was this intended?")
# return sum(1 for doc in self) # sum(empty generator) == 0, so this works even for an empty corpus
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""
Save an existing `corpus` to disk.
Some formats also support saving the dictionary (`feature_id->word` mapping),
which can in this case be provided by the optional `id2word` parameter.
>>> MmCorpus.save_corpus('file.mm', corpus)
Some corpora also support an index of where each document begins, so
that the documents on disk can be accessed in O(1) time (see the
`corpora.IndexedCorpus` base class). In this case, `save_corpus` is automatically
called internally by `serialize`, which does `save_corpus` plus saves the index
at the same time, so you want to store the corpus with::
>>> MmCorpus.serialize('file.mm', corpus) # stores index as well, allowing random access to individual documents
Calling `serialize()` is preferred to calling `save_corpus()`.
"""
raise NotImplementedError('cannot instantiate abstract base class')
# example code:
logger.info("converting corpus to ??? format: %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for doc in corpus: # iterate over the document stream
fmt = str(doc) # format the document appropriately...
fout.write(utils.to_utf8("%s\n" % fmt)) # serialize the formatted document to disk
#endclass CorpusABC
class TransformedCorpus(CorpusABC):
def __init__(self, obj, corpus, chunksize=None):
self.obj, self.corpus, self.chunksize = obj, corpus, chunksize
self.metadata = False
def __len__(self):
return len(self.corpus)
def __iter__(self):
if self.chunksize:
for chunk in utils.grouper(self.corpus, self.chunksize):
for transformed in self.obj.__getitem__(chunk, chunksize=None):
yield transformed
else:
for doc in self.corpus:
yield self.obj[doc]
def __getitem__(self, docno):
if hasattr(self.corpus, '__getitem__'):
return self.obj[self.corpus[docno]]
else:
raise RuntimeError('Type {} does not support slicing.'.format(type(self.corpus)))
#endclass TransformedCorpus
class TransformationABC(utils.SaveLoad):
"""
Interface for transformations. A 'transformation' is any object which accepts
a sparse document via the dictionary notation `[]` and returns another sparse
document in its stead::
>>> transformed_doc = transformation[doc]
or also::
>>> transformed_corpus = transformation[corpus]
See the :mod:`gensim.models.tfidfmodel` module for an example of a transformation.
"""
def __getitem__(self, vec):
"""
Transform vector from one vector space into another
**or**
Transform a whole corpus into another.
"""
raise NotImplementedError('cannot instantiate abstract base class')
def _apply(self, corpus, chunksize=None):
"""
Apply the transformation to a whole corpus (as opposed to a single document)
and return the result as another corpus.
"""
return TransformedCorpus(self, corpus, chunksize)
#endclass TransformationABC
class SimilarityABC(utils.SaveLoad):
"""
Abstract interface for similarity searches over a corpus.
In all instances, there is a corpus against which we want to perform the
similarity search.
For each similarity search, the input is a document and the output are its
similarities to individual corpus documents.
Similarity queries are realized by calling ``self[query_document]``.
There is also a convenience wrapper, where iterating over `self` yields
similarities of each document in the corpus against the whole corpus (ie.,
the query is each corpus document in turn).
"""
def __init__(self, corpus):
raise NotImplementedError("cannot instantiate Abstract Base Class")
def get_similarities(self, doc):
# (Sparse)MatrixSimilarity override this method so that they both use the
# same __getitem__ method, defined below
raise NotImplementedError("cannot instantiate Abstract Base Class")
def __getitem__(self, query):
"""Get similarities of document `query` to all documents in the corpus.
**or**
If `query` is a corpus (iterable of documents), return a matrix of similarities
of all query documents vs. all corpus document. Using this type of batch
query is more efficient than computing the similarities one document after
another.
"""
is_corpus, query = utils.is_corpus(query)
if self.normalize:
# self.normalize only works if the input is a plain gensim vector/corpus (as
# advertised in the doc). in fact, input can be a numpy or scipy.sparse matrix
# as well, but in that case assume tricks are happening and don't normalize
# anything (self.normalize has no effect).
if matutils.ismatrix(query):
import warnings
# warnings.warn("non-gensim input must already come normalized")
else:
if is_corpus:
query = [matutils.unitvec(v) for v in query]
else:
query = matutils.unitvec(query)
result = self.get_similarities(query)
if self.num_best is None:
return result
# if the input query was a corpus (=more documents), compute the top-n
# most similar for each document in turn
if matutils.ismatrix(result):
return [matutils.full2sparse_clipped(v, self.num_best) for v in result]
else:
# otherwise, return top-n of the single input document
return matutils.full2sparse_clipped(result, self.num_best)
def __iter__(self):
"""
For each index document, compute cosine similarity against all other
documents in the index and yield the result.
"""
# turn off query normalization (vectors in the index are assumed to be already normalized)
norm = self.normalize
self.normalize = False
# Try to compute similarities in bigger chunks of documents (not
# one query = a single document after another). The point is, a
# bigger query of N documents is faster than N small queries of one
# document.
#
# After computing similarities of the bigger query in `self[chunk]`,
# yield the resulting similarities one after another, so that it looks
# exactly the same as if they had been computed with many small queries.
try:
chunking = self.chunksize > 1
except AttributeError:
# chunking not supported; fall back to the (slower) mode of 1 query=1 document
chunking = False
if chunking:
# assumes `self.corpus` holds the index as a 2-d numpy array.
# this is true for MatrixSimilarity and SparseMatrixSimilarity, but
# may not be true for other (future) classes..?
for chunk_start in xrange(0, self.index.shape[0], self.chunksize):
# scipy.sparse doesn't allow slicing beyond real size of the matrix
# (unlike numpy). so, clip the end of the chunk explicitly to make
# scipy.sparse happy
chunk_end = min(self.index.shape[0], chunk_start + self.chunksize)
chunk = self.index[chunk_start : chunk_end]
if chunk.shape[0] > 1:
for sim in self[chunk]:
yield sim
else:
yield self[chunk]
else:
for doc in self.index:
yield self[doc]
# restore old normalization value
self.normalize = norm
#endclass SimilarityABC
| gpl-3.0 |
system1357/pdk7105-3.4 | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
manjunaths/tensorflow | tensorflow/python/ops/random_ops.py | 14 | 17940 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating random numbers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_random_ops import *
# pylint: enable=wildcard-import
def _ShapeTensor(shape):
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
if isinstance(shape, (tuple, list)) and not shape:
dtype = dtypes.int32
else:
dtype = None
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
# pylint: disable=protected-access
def random_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a normal distribution.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._random_standard_normal(shape_tensor,
dtype,
seed=seed1,
seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NotDifferentiable("RandomStandardNormal")
def parameterized_truncated_normal(shape,
means=0.0,
stddevs=1.0,
minvals=-2.0,
maxvals=2.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
means: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddevs: A 0-D Tensor or Python value of type `dtype`. The standard
deviation of the truncated normal distribution.
minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of
the truncated normal distribution.
maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of
the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "parameterized_truncated_normal",
[shape, means, stddevs, minvals, maxvals]) as name:
shape_tensor = _ShapeTensor(shape)
means_tensor = ops.convert_to_tensor(means, dtype=dtype, name="means")
stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name="stddevs")
minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name="minvals")
maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name="maxvals")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._parameterized_truncated_normal(shape_tensor,
means_tensor,
stddevs_tensor,
minvals_tensor,
maxvals_tensor,
seed=seed1,
seed2=seed2)
return rnd
def truncated_normal(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a truncated normal distribution.
The generated values follow a normal distribution with specified mean and
standard deviation, except that values whose magnitude is more than 2 standard
deviations from the mean are dropped and re-picked.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the
truncated normal distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the truncated normal distribution.
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random truncated normal values.
"""
with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name:
shape_tensor = _ShapeTensor(shape)
mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
seed1, seed2 = random_seed.get_seed(seed)
rnd = gen_random_ops._truncated_normal(shape_tensor,
dtype,
seed=seed1,
seed2=seed2)
mul = rnd * stddev_tensor
value = math_ops.add(mul, mean_tensor, name=name)
return value
ops.NotDifferentiable("ParameterizedTruncatedNormal")
ops.NotDifferentiable("TruncatedNormal")
def random_uniform(shape,
minval=0,
maxval=None,
dtype=dtypes.float32,
seed=None,
name=None):
"""Outputs random values from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range, while
the upper bound `maxval` is excluded.
For floats, the default range is `[0, 1)`. For ints, at least `maxval` must
be specified explicitly.
In the integer case, the random integers are slightly biased unless
`maxval - minval` is an exact power of two. The bias is small for values of
`maxval - minval` significantly smaller than the range of the output (either
`2**32` or `2**64`).
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the
range of random values to generate. Defaults to 0.
maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on
the range of random values to generate. Defaults to 1 if `dtype` is
floating point.
dtype: The type of the output: `float32`, `float64`, `int32`, or `int64`.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random uniform values.
Raises:
ValueError: If `dtype` is integral and `maxval` is not specified.
"""
dtype = dtypes.as_dtype(dtype)
if maxval is None:
if dtype.is_integer:
raise ValueError("Must specify maxval for integer dtype %r" % dtype)
maxval = 1
with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name:
shape = _ShapeTensor(shape)
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max")
seed1, seed2 = random_seed.get_seed(seed)
if dtype.is_integer:
return gen_random_ops._random_uniform_int(shape,
minval,
maxval,
seed=seed1,
seed2=seed2,
name=name)
else:
rnd = gen_random_ops._random_uniform(shape,
dtype,
seed=seed1,
seed2=seed2)
return math_ops.add(rnd * (maxval - minval), minval, name=name)
ops.NotDifferentiable("RandomUniform")
def random_shuffle(value, seed=None, name=None):
"""Randomly shuffles a tensor along its first dimension.
The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
to one and only one `output[i]`. For example, a mapping that might occur for a
3x2 tensor is:
```python
[[1, 2], [[5, 6],
[3, 4], ==> [1, 2],
[5, 6]] [3, 4]]
```
Args:
value: A Tensor to be shuffled.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for the operation (optional).
Returns:
A tensor of same shape and type as `value`, shuffled along its first
dimension.
"""
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops._random_shuffle(value,
seed=seed1,
seed2=seed2,
name=name)
def random_crop(value, size, seed=None, name=None):
"""Randomly crops a tensor to a given size.
Slices a shape `size` portion out of `value` at a uniformly chosen offset.
Requires `value.shape >= size`.
If a dimension should not be cropped, pass the full size of that dimension.
For example, RGB images can be cropped with
`size = [crop_height, crop_width, 3]`.
Args:
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A cropped tensor of the same rank as `value` and shape `size`.
"""
# TODO(shlens): Implement edge case to guarantee output size dimensions.
# If size > value.shape, zero pad the result so that it always has shape
# exactly size.
with ops.name_scope(name, "random_crop", [value, size]) as name:
value = ops.convert_to_tensor(value, name="value")
size = ops.convert_to_tensor(size, dtype=dtypes.int32, name="size")
shape = array_ops.shape(value)
check = control_flow_ops.Assert(
math_ops.reduce_all(shape >= size),
["Need value.shape >= size, got ", shape, size])
shape = control_flow_ops.with_dependencies([check], shape)
limit = shape - size + 1
offset = random_uniform(
array_ops.shape(shape),
dtype=size.dtype,
maxval=size.dtype.max,
seed=seed) % limit
return array_ops.slice(value, offset, size, name=name)
def multinomial(logits, num_samples, seed=None, name=None):
"""Draws samples from a multinomial distribution.
Example:
```python
# samples has shape [1, 5], where each value is either 0 or 1 with equal
# probability.
samples = tf.multinomial(tf.log([[10., 10.]]), 5)
```
Args:
logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice
`[i, :]` represents the unnormalized log probabilities for all classes.
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A Python integer. Used to create a random seed for the distribution.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: Optional name for the operation.
Returns:
The drawn samples of shape `[batch_size, num_samples]`.
"""
with ops.name_scope(name, "multinomial", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops.multinomial(logits,
num_samples,
seed=seed1,
seed2=seed2)
ops.NotDifferentiable("Multinomial")
def random_gamma(shape,
alpha,
beta=None,
dtype=dtypes.float32,
seed=None,
name=None):
"""Draws `shape` samples from each of the given Gamma distribution(s).
`alpha` is the shape parameter describing the distribution(s), and `beta` is
the inverse scale parameter(s).
Example:
samples = tf.random_gamma([10], [0.5, 1.5])
# samples has shape [10, 2], where each slice [:, 0] and [:, 1] represents
# the samples drawn from each distribution
samples = tf.random_gamma([7, 5], [0.5, 1.5])
# samples has shape [7, 5, 2], where each slice [:, :, 0] and [:, :, 1]
# represents the 7x5 samples drawn from each of the two distributions
samples = tf.random_gamma([30], [[1.],[3.],[5.]], beta=[[3., 4.]])
# samples has shape [30, 3, 2], with 30 samples each of 3x2 distributions.
Note that for small alpha values, there is a chance you will draw a value of
exactly 0, which gets worse for lower-precision dtypes, even though zero is
not in the support of the gamma distribution.
Relevant cdfs (~chance you will draw a exactly-0 value):
```
stats.gamma(.01).cdf(np.finfo(np.float16).tiny)
0.91269738769897879
stats.gamma(.01).cdf(np.finfo(np.float32).tiny)
0.41992668622045726
stats.gamma(.01).cdf(np.finfo(np.float64).tiny)
0.00084322740680686662
stats.gamma(.35).cdf(np.finfo(np.float16).tiny)
0.037583276135263931
stats.gamma(.35).cdf(np.finfo(np.float32).tiny)
5.9514895726818067e-14
stats.gamma(.35).cdf(np.finfo(np.float64).tiny)
2.3529843400647272e-108
```
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output samples
to be drawn per alpha/beta-parameterized distribution.
alpha: A Tensor or Python value or N-D array of type `dtype`. `alpha`
provides the shape parameter(s) describing the gamma distribution(s) to
sample. Must be broadcastable with `beta`.
beta: A Tensor or Python value or N-D array of type `dtype`. Defaults to 1.
`beta` provides the inverse scale parameter(s) of the gamma
distribution(s) to sample. Must be broadcastable with `alpha`.
dtype: The type of alpha, beta, and the output: `float16`, `float32`, or
`float64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `tf.concat(shape, tf.shape(alpha + beta))`
with values of type `dtype`.
"""
with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
alpha = ops.convert_to_tensor(alpha, name="alpha", dtype=dtype)
beta = ops.convert_to_tensor(beta if beta is not None else 1,
name="beta",
dtype=dtype)
alpha_broadcast = alpha + array_ops.zeros_like(beta)
seed1, seed2 = random_seed.get_seed(seed)
return gen_random_ops._random_gamma(shape,
alpha_broadcast,
seed=seed1,
seed2=seed2) / beta
ops.NotDifferentiable("RandomGamma")
| apache-2.0 |
yeyanchao/calibre | src/calibre/gui2/convert/xpath_wizard_ui.py | 1 | 4085 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yc/code/calibre/calibre/src/calibre/gui2/convert/xpath_wizard.ui'
#
# Created: Thu Oct 25 16:54:55 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 381)
Form.setWindowTitle(_("Form"))
self.verticalLayout = QtGui.QVBoxLayout(Form)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Form)
self.label.setText(_("Match HTML &tags with tag name:"))
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.tag = QtGui.QComboBox(Form)
self.tag.setEditable(True)
self.tag.setObjectName(_fromUtf8("tag"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(0, _("*"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(1, _("a"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(2, _("br"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(3, _("div"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(4, _("h1"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(5, _("h2"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(6, _("h3"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(7, _("h4"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(8, _("h5"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(9, _("h6"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(10, _("hr"))
self.tag.addItem(_fromUtf8(""))
self.tag.setItemText(11, _("span"))
self.verticalLayout.addWidget(self.tag)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setText(_("Having the &attribute:"))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout.addWidget(self.label_2)
self.attribute = QtGui.QLineEdit(Form)
self.attribute.setObjectName(_fromUtf8("attribute"))
self.verticalLayout.addWidget(self.attribute)
self.label_3 = QtGui.QLabel(Form)
self.label_3.setText(_("With &value:"))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout.addWidget(self.label_3)
self.label_4 = QtGui.QLabel(Form)
self.label_4.setText(_("(A regular expression)"))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout.addWidget(self.label_4)
self.value = QtGui.QLineEdit(Form)
self.value.setObjectName(_fromUtf8("value"))
self.verticalLayout.addWidget(self.value)
self.label_5 = QtGui.QLabel(Form)
self.label_5.setText(_("<p>For example, to match all h2 tags that have class=\"chapter\", set tag to <i>h2</i>, attribute to <i>class</i> and value to <i>chapter</i>.</p><p>Leaving attribute blank will match any attribute and leaving value blank will match any value. Setting tag to * will match any tag.</p><p>To learn more advanced usage of XPath see the <a href=\"http://manual.calibre-ebook.com/xpath.html\">XPath Tutorial</a>."))
self.label_5.setWordWrap(True)
self.label_5.setOpenExternalLinks(True)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.verticalLayout.addWidget(self.label_5)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.label.setBuddy(self.tag)
self.label_2.setBuddy(self.attribute)
self.label_3.setBuddy(self.value)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
pass
| gpl-3.0 |
guorendong/iridium-browser-ubuntu | tools/gyp/test/rules-rebuild/gyptest-default.py | 345 | 2242 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a rule that generates multiple outputs rebuilds
correctly when the inputs change.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default')
test.run_gyp('same_target.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in!
Hello from prog2.in!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.sleep()
contents = test.read(['relocate', 'src', 'prog1.in'])
contents = contents.replace('!', ' AGAIN!')
test.write(['relocate', 'src', 'prog1.in'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in AGAIN!
Hello from prog2.in!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.sleep()
contents = test.read(['relocate', 'src', 'prog2.in'])
contents = contents.replace('!', ' AGAIN!')
test.write(['relocate', 'src', 'prog2.in'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in AGAIN!
Hello from prog2.in AGAIN!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
# Test that modifying a rule's inputs (specifically, make-sources.py) causes
# the targets to be built.
test.sleep()
contents = test.read(['relocate', 'src', 'make-sources.py'])
contents = contents.replace('%s', 'the amazing %s')
test.write(['relocate', 'src', 'make-sources.py'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from the amazing prog1.in AGAIN!
Hello from the amazing prog2.in AGAIN!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.pass_test()
| bsd-3-clause |
kionz/librime | thirdparty/src/yaml-cpp/test/gmock-1.7.0/scripts/generator/cpp/gmock_class.py | 268 | 8241 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Google Mock classes from base classes.
This program will read in a C++ source file and output the Google Mock
classes for the specified classes. If no class is specified, all
classes in the source file are emitted.
Usage:
gmock_class.py header-file.h [ClassName]...
Output is sent to stdout.
"""
__author__ = '[email protected] (Neal Norwitz)'
import os
import re
import sys
from cpp import ast
from cpp import utils
# Preserve compatibility with Python 2.3.
try:
_dummy = set
except NameError:
import sets
set = sets.Set
_VERSION = (1, 0, 1) # The version of this script.
# How many spaces to indent. Can set me with the INDENT environment variable.
_INDENT = 2
def _GenerateMethods(output_lines, source, class_node):
function_type = ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL
ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR
indent = ' ' * _INDENT
for node in class_node.body:
# We only care about virtual functions.
if (isinstance(node, ast.Function) and
node.modifiers & function_type and
not node.modifiers & ctor_or_dtor):
# Pick out all the elements we need from the original function.
const = ''
if node.modifiers & ast.FUNCTION_CONST:
const = 'CONST_'
return_type = 'void'
if node.return_type:
# Add modifiers like 'const'.
modifiers = ''
if node.return_type.modifiers:
modifiers = ' '.join(node.return_type.modifiers) + ' '
return_type = modifiers + node.return_type.name
template_args = [arg.name for arg in node.return_type.templated_types]
if template_args:
return_type += '<' + ', '.join(template_args) + '>'
if len(template_args) > 1:
for line in [
'// The following line won\'t really compile, as the return',
'// type has multiple template arguments. To fix it, use a',
'// typedef for the return type.']:
output_lines.append(indent + line)
if node.return_type.pointer:
return_type += '*'
if node.return_type.reference:
return_type += '&'
num_parameters = len(node.parameters)
if len(node.parameters) == 1:
first_param = node.parameters[0]
if source[first_param.start:first_param.end].strip() == 'void':
# We must treat T(void) as a function with no parameters.
num_parameters = 0
tmpl = ''
if class_node.templated_types:
tmpl = '_T'
mock_method_macro = 'MOCK_%sMETHOD%d%s' % (const, num_parameters, tmpl)
args = ''
if node.parameters:
# Due to the parser limitations, it is impossible to keep comments
# while stripping the default parameters. When defaults are
# present, we choose to strip them and comments (and produce
# compilable code).
# TODO([email protected]): Investigate whether it is possible to
# preserve parameter name when reconstructing parameter text from
# the AST.
if len([param for param in node.parameters if param.default]) > 0:
args = ', '.join(param.type.name for param in node.parameters)
else:
# Get the full text of the parameters from the start
# of the first parameter to the end of the last parameter.
start = node.parameters[0].start
end = node.parameters[-1].end
# Remove // comments.
args_strings = re.sub(r'//.*', '', source[start:end])
# Condense multiple spaces and eliminate newlines putting the
# parameters together on a single line. Ensure there is a
# space in an argument which is split by a newline without
# intervening whitespace, e.g.: int\nBar
args = re.sub(' +', ' ', args_strings.replace('\n', ' '))
# Create the mock method definition.
output_lines.extend(['%s%s(%s,' % (indent, mock_method_macro, node.name),
'%s%s(%s));' % (indent*3, return_type, args)])
def _GenerateMocks(filename, source, ast_list, desired_class_names):
processed_class_names = set()
lines = []
for node in ast_list:
if (isinstance(node, ast.Class) and node.body and
# desired_class_names being None means that all classes are selected.
(not desired_class_names or node.name in desired_class_names)):
class_name = node.name
parent_name = class_name
processed_class_names.add(class_name)
class_node = node
# Add namespace before the class.
if class_node.namespace:
lines.extend(['namespace %s {' % n for n in class_node.namespace]) # }
lines.append('')
# Add template args for templated classes.
if class_node.templated_types:
# TODO(paulchang): The AST doesn't preserve template argument order,
# so we have to make up names here.
# TODO(paulchang): Handle non-type template arguments (e.g.
# template<typename T, int N>).
template_arg_count = len(class_node.templated_types.keys())
template_args = ['T%d' % n for n in range(template_arg_count)]
template_decls = ['typename ' + arg for arg in template_args]
lines.append('template <' + ', '.join(template_decls) + '>')
parent_name += '<' + ', '.join(template_args) + '>'
# Add the class prolog.
lines.append('class Mock%s : public %s {' # }
% (class_name, parent_name))
lines.append('%spublic:' % (' ' * (_INDENT // 2)))
# Add all the methods.
_GenerateMethods(lines, source, class_node)
# Close the class.
if lines:
# If there are no virtual methods, no need for a public label.
if len(lines) == 2:
del lines[-1]
# Only close the class if there really is a class.
lines.append('};')
lines.append('') # Add an extra newline.
# Close the namespace.
if class_node.namespace:
for i in range(len(class_node.namespace)-1, -1, -1):
lines.append('} // namespace %s' % class_node.namespace[i])
lines.append('') # Add an extra newline.
if desired_class_names:
missing_class_name_list = list(desired_class_names - processed_class_names)
if missing_class_name_list:
missing_class_name_list.sort()
sys.stderr.write('Class(es) not found in %s: %s\n' %
(filename, ', '.join(missing_class_name_list)))
elif not processed_class_names:
sys.stderr.write('No class found in %s\n' % filename)
return lines
def main(argv=sys.argv):
if len(argv) < 2:
sys.stderr.write('Google Mock Class Generator v%s\n\n' %
'.'.join(map(str, _VERSION)))
sys.stderr.write(__doc__)
return 1
global _INDENT
try:
_INDENT = int(os.environ['INDENT'])
except KeyError:
pass
except:
sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT'))
filename = argv[1]
desired_class_names = None # None means all classes in the source file.
if len(argv) >= 3:
desired_class_names = set(argv[2:])
source = utils.ReadFile(filename)
if source is None:
return 1
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# An error message was already printed since we couldn't parse.
pass
else:
lines = _GenerateMocks(filename, source, entire_ast, desired_class_names)
sys.stdout.write('\n'.join(lines))
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
jhutar/spacewalk | backend/cdn_tools/repository.py | 1 | 19265 | # Copyright (c) 2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
import json
from spacewalk.server import rhnSQL
from spacewalk.server.importlib.backendOracle import SQLBackend
from spacewalk.server.importlib.contentSourcesImport import ContentSourcesImport
from spacewalk.satellite_tools.satCerts import verify_certificate_dates
from spacewalk.satellite_tools.syncLib import log, log2
from spacewalk.server.importlib.importLib import ContentSource, ContentSourceSsl
import constants
class CdnRepositoryManager(object):
"""Class managing CDN repositories, connected channels etc."""
def __init__(self, local_mount_point=None, client_cert_id=None):
rhnSQL.initDB()
self.local_mount_point = local_mount_point
self.repository_tree = CdnRepositoryTree()
self._populate_repository_tree(client_cert_id=client_cert_id)
f = None
try:
try:
# Channel to repositories mapping
f = open(constants.CONTENT_SOURCE_MAPPING_PATH, 'r')
self.content_source_mapping = json.load(f)
f.close()
# Channel to kickstart repositories mapping
f = open(constants.KICKSTART_SOURCE_MAPPING_PATH, 'r')
self.kickstart_source_mapping = json.load(f)
f.close()
# Kickstart metadata
f = open(constants.KICKSTART_DEFINITIONS_PATH, 'r')
self.kickstart_metadata = json.load(f)
f.close()
except IOError:
e = sys.exc_info()[1]
log(1, "Ignoring channel mappings: %s" % e)
self.content_source_mapping = {}
self.kickstart_source_mapping = {}
self.kickstart_metadata = {}
finally:
if f is not None:
f.close()
self.__init_repository_to_channels_mapping()
# Map repositories to channels
def __init_repository_to_channels_mapping(self):
self.repository_to_channels = {}
for channel in self.content_source_mapping:
for source in self.content_source_mapping[channel]:
relative_url = source['relative_url']
if relative_url in self.repository_to_channels:
self.repository_to_channels[relative_url].append(channel)
else:
self.repository_to_channels[relative_url] = [channel]
for channel in self.kickstart_metadata:
for tree in self.kickstart_metadata[channel]:
tree_label = tree['ks_tree_label']
if tree_label in self.kickstart_source_mapping:
relative_url = self.kickstart_source_mapping[tree_label][0]['relative_url']
if relative_url in self.repository_to_channels:
self.repository_to_channels[relative_url].append(channel)
else:
self.repository_to_channels[relative_url] = [channel]
def _populate_repository_tree(self, client_cert_id=None):
sql = """
select cs.label, cs.source_url, csssl.ssl_ca_cert_id,
csssl.ssl_client_cert_id, csssl.ssl_client_key_id
from rhnContentSource cs inner join
rhnContentSourceSsl csssl on cs.id = csssl.content_source_id
where cs.org_id is null
and cs.label like :prefix || '%%'
"""
# Create repository tree containing only repositories provided from single client certificate
if client_cert_id:
sql += " and csssl.ssl_client_cert_id = :client_cert_id"
query = rhnSQL.prepare(sql)
query.execute(prefix=constants.MANIFEST_REPOSITORY_DB_PREFIX, client_cert_id=client_cert_id)
rows = query.fetchall_dict() or []
cdn_repositories = {}
# Loop all rows from DB
for row in rows:
label = row['label']
if label in cdn_repositories:
cdn_repository = cdn_repositories[label]
else:
cdn_repository = CdnRepository(label, row['source_url'])
cdn_repositories[label] = cdn_repository
# Append SSL cert, key set to repository
ssl_set = CdnRepositorySsl(row['ssl_ca_cert_id'], row['ssl_client_cert_id'], row['ssl_client_key_id'])
cdn_repository.add_ssl_set(ssl_set)
# Add populated repository to tree
for cdn_repository in cdn_repositories.values():
self.repository_tree.add_repository(cdn_repository)
def get_content_sources_regular(self, channel_label, source=False):
if channel_label in self.content_source_mapping:
return [x for x in self.content_source_mapping[channel_label]
if source or x['pulp_content_category'] != "source"]
else:
return []
def get_content_sources_kickstart(self, channel_label):
repositories = []
if channel_label in self.kickstart_metadata:
for tree in self.kickstart_metadata[channel_label]:
tree_label = tree['ks_tree_label']
if tree_label in self.kickstart_source_mapping:
# One tree comes from one repo, one repo for each tree is in the mapping,
# in future there may be multiple repos for one tree and we will need to select
# correct repo
repository = self.kickstart_source_mapping[tree_label][0]
repository['ks_tree_label'] = tree_label
repositories.append(repository)
else:
log2(1, 1, "WARNING: Can't find repository for kickstart tree in mappings: %s"
% tree_label, stream=sys.stderr)
return repositories
def get_content_sources(self, channel_label, source=False):
sources = self.get_content_sources_regular(channel_label, source=source)
kickstart_sources = self.get_content_sources_kickstart(channel_label)
return sources + kickstart_sources
def check_channel_availability(self, channel_label, no_kickstarts=False):
"""Checks if all repositories for channel are available."""
if no_kickstarts:
sources = self.get_content_sources_regular(channel_label)
else:
sources = self.get_content_sources(channel_label)
# No content, no channel
if not sources:
return False
for source in sources:
if not self.check_repository_availability(source['relative_url'], channel_label=channel_label):
return False
return True
def check_repository_availability(self, relative_url, channel_label=None):
try:
crypto_keys = self.get_repository_crypto_keys(relative_url)
except CdnRepositoryNotFoundError:
log2(1, 1, "ERROR: No SSL certificates were found for repository '%s'" % relative_url, stream=sys.stderr)
return False
# Check SSL certificates
if not crypto_keys:
if channel_label:
log2(1, 1, "ERROR: No valid SSL certificates were found for repository '%s'"
" required for channel '%s'." % (relative_url, channel_label), stream=sys.stderr)
else:
log2(1, 1, "ERROR: No valid SSL certificates were found for repository '%s'." % relative_url,
stream=sys.stderr)
return False
# Try to look for repomd file
if self.local_mount_point and not os.path.isfile(os.path.join(
self.local_mount_point, relative_url[1:], "repodata/repomd.xml")):
return False
return True
def get_content_sources_import_batch(self, channel_label, backend, repos=None):
batch = []
# No custom repos specified, look into channel mappings
if not repos:
sources = self.get_content_sources(channel_label)
for source in sources:
if 'ks_tree_label' in source:
content_source = self._create_content_source_obj(source['ks_tree_label'],
source['relative_url'], backend)
else:
content_source = self._create_content_source_obj(source['pulp_repo_label_v2'],
source['relative_url'], backend)
batch.append(content_source)
# We want to sync not-mapped repositories
else:
for index, repo in enumerate(repos):
repo_label = "%s-%d" % (channel_label, index)
content_source = self._create_content_source_obj(repo_label, repo, backend)
batch.append(content_source)
return batch
def _create_content_source_obj(self, label, source_url, backend):
type_id = backend.lookupContentSourceType('yum')
content_source = ContentSource()
content_source['label'] = label
content_source['source_url'] = source_url
content_source['org_id'] = None
content_source['type_id'] = type_id
content_source['ssl-sets'] = []
repository = self.repository_tree.find_repository(source_url)
for ssl_set in repository.get_ssl_sets():
content_source_ssl = ContentSourceSsl()
content_source_ssl['ssl_ca_cert_id'] = ssl_set.get_ca_cert()
content_source_ssl['ssl_client_cert_id'] = ssl_set.get_client_cert()
content_source_ssl['ssl_client_key_id'] = ssl_set.get_client_key()
content_source['ssl-sets'].append(content_source_ssl)
return content_source
def get_repository_crypto_keys(self, url):
repo = self.repository_tree.find_repository(url)
crypto_keys = []
for ssl_set in repo.get_ssl_sets():
keys = ssl_set.get_crypto_keys(check_dates=True)
if keys:
crypto_keys.append(keys)
return crypto_keys
def assign_repositories_to_channel(self, channel_label, delete_repos=None, add_repos=None):
backend = SQLBackend()
self.unlink_all_repos(channel_label, custom_only=True)
repos = self.list_associated_repos(channel_label)
if delete_repos:
for to_delete in delete_repos:
if to_delete in repos:
repos.remove(to_delete)
log(0, "Removing repository '%s' from channel." % to_delete)
else:
log2(0, 0, "WARNING: Repository '%s' is not attached to channel." % to_delete, stream=sys.stderr)
if add_repos:
for to_add in add_repos:
if to_add not in repos:
repos.append(to_add)
log(0, "Attaching repository '%s' to channel." % to_add)
else:
log2(0, 0, "WARNING: Repository '%s' is already attached to channel." % to_add, stream=sys.stderr)
# If there are any repositories intended to be attached to channel
if repos:
content_sources_batch = self.get_content_sources_import_batch(
channel_label, backend, repos=sorted(repos))
for content_source in content_sources_batch:
content_source['channels'] = [channel_label]
importer = ContentSourcesImport(content_sources_batch, backend)
importer.run()
else:
# Make sure everything is unlinked
self.unlink_all_repos(channel_label)
return len(repos)
@staticmethod
def unlink_all_repos(channel_label, custom_only=False):
sql = """
delete from rhnChannelContentSource ccs
where ccs.channel_id = (select id from rhnChannel where label = :label)
"""
if custom_only:
sql += """
and ccs.source_id in (select id from rhnContentSource where id = ccs.source_id and org_id is not null)
"""
h = rhnSQL.prepare(sql)
h.execute(label=channel_label)
rhnSQL.commit()
@staticmethod
def list_associated_repos(channel_label):
h = rhnSQL.prepare("""
select cs.source_url
from rhnChannel c inner join
rhnChannelContentSource ccs on c.id = ccs.channel_id inner join
rhnContentSource cs on ccs.source_id = cs.id
where c.label = :label
and cs.org_id is null
""")
h.execute(label=channel_label)
paths = [r['source_url'] for r in h.fetchall_dict() or []]
return paths
@staticmethod
def list_provided_repos(crypto_key_id):
h = rhnSQL.prepare("""
select cs.source_url
from rhnContentSource cs inner join
rhnContentSourceSsl csssl on cs.id = csssl.content_source_id
where cs.label like :prefix || '%%'
and csssl.ssl_client_cert_id = :client_cert_id
""")
h.execute(prefix=constants.MANIFEST_REPOSITORY_DB_PREFIX, client_cert_id=crypto_key_id)
paths = [r['source_url'] for r in h.fetchall_dict() or []]
return paths
@staticmethod
def cleanup_orphaned_repos():
h = rhnSQL.prepare("""
delete from rhnContentSource cs
where cs.org_id is null
and cs.label not like :prefix || '%%'
and not exists (select channel_id from rhnChannelContentSource where source_id = cs.id)
""")
h.execute(prefix=constants.MANIFEST_REPOSITORY_DB_PREFIX)
rhnSQL.commit()
@staticmethod
def get_content_source_label(source):
if 'pulp_repo_label_v2' in source:
return source['pulp_repo_label_v2']
elif 'ks_tree_label' in source:
return source['ks_tree_label']
else:
raise InvalidContentSourceType()
def list_channels_containing_repository(self, relative_path):
if relative_path in self.repository_to_channels:
return self.repository_to_channels[relative_path]
else:
return []
class CdnRepositoryTree(object):
"""Class representing activated CDN repositories in tree structure.
Leafs contains CdnRepository instances.
Allows us to match direct CDN URLs without variables (coming from mapping)
to CDN URLs with variables (coming from manifest and having SSL keys/certs assigned)"""
VARIABLES = ['$releasever', '$basearch']
def __init__(self):
self.root = {}
def add_repository(self, repository):
"""Add new CdnRepository to tree."""
url = repository.get_url()
path = [x for x in url.split('/') if x]
node = self.root
for part in path[:-1]:
if part not in node:
node[part] = {}
node = node[part]
# Save repository into leaf
node[path[-1]] = repository
def _browse_node(self, node, keys):
"""Recursive function going through tree."""
# Return leaf
if not keys:
if not isinstance(node, dict):
return node
else:
raise CdnRepositoryNotFoundError()
step = keys[0]
to_check = [x for x in node.keys() if x in self.VARIABLES or x == step]
# Remove first step in path, create new list
next_keys = keys[1:]
# Check all available paths
for key in to_check:
try:
# Try to get next node and run this function recursively
next_node = node[key]
return self._browse_node(next_node, next_keys)
# From here
except KeyError:
pass
# From recurrent call
except CdnRepositoryNotFoundError:
pass
raise CdnRepositoryNotFoundError()
def find_repository(self, url):
"""Finds matching repository in tree.
url is relative CDN url - e.g. /content/dist/rhel/server/6/6Server/x86_64/os"""
path = [x for x in url.split('/') if x]
node = self.root
try:
found = self._browse_node(node, path)
except CdnRepositoryNotFoundError:
raise CdnRepositoryNotFoundError("ERROR: Repository '%s' was not found." % url)
return found
class CdnRepositoryNotFoundError(Exception):
pass
class InvalidContentSourceType(Exception):
pass
class CdnRepository(object):
"""Class representing CDN repository."""
def __init__(self, label, url):
self.label = label
self.url = url
self.ssl_sets = []
# CdnRepositorySsl instance
def add_ssl_set(self, ssl_set):
self.ssl_sets.append(ssl_set)
def get_ssl_sets(self):
return self.ssl_sets
def get_label(self):
return self.label
def get_url(self):
return self.url
class CdnRepositorySsl(object):
"""Class representing single SSL certificate, key set for single CDN repository"""
def __init__(self, ca_cert, client_cert, client_key):
self.ca_cert = int(ca_cert)
self.client_cert = int(client_cert)
self.client_key = int(client_key)
def get_ca_cert(self):
return self.ca_cert
def get_client_cert(self):
return self.client_cert
def get_client_key(self):
return self.client_key
def get_crypto_keys(self, check_dates=False):
ssl_query = rhnSQL.prepare("""
select description, key from rhnCryptoKey where id = :id
""")
keys = {}
ssl_query.execute(id=self.ca_cert)
row = ssl_query.fetchone_dict()
keys['ca_cert'] = (str(row['description']), str(row['key']))
ssl_query.execute(id=self.client_cert)
row = ssl_query.fetchone_dict()
keys['client_cert'] = (str(row['description']), str(row['key']))
ssl_query.execute(id=self.client_key)
row = ssl_query.fetchone_dict()
keys['client_key'] = (str(row['description']), str(row['key']))
# Check if SSL certificates are usable
if check_dates:
failed = 0
for key in (keys['ca_cert'], keys['client_cert']):
if not verify_certificate_dates(key[1]):
log(1, "WARNING: Problem with dates in certificate '%s'. "
"Please check validity of this certificate." % key[0])
failed += 1
if failed:
return {}
return keys
| gpl-2.0 |
ioanpocol/superdesk-core | tests/io/feed_parsers/bbc_ninjs_test.py | 3 | 2470 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013-2018 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import hashlib
from superdesk.tests import TestCase
from superdesk.io.feed_parsers.bbc_ninjs import BBCNINJSFeedParser
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, GUID_TAG
from superdesk.metadata.utils import generate_guid
class BBCNINJSTestCase(TestCase):
def setUp(self):
dirname = os.path.dirname(os.path.realpath(__file__))
fixture = os.path.normpath(os.path.join(dirname, '../fixtures', self.filename))
provider = {'name': 'Test'}
with open(fixture, 'r') as json_file:
data = json_file.read()
self.items = BBCNINJSFeedParser().parse(data, provider)
class SimpleTestCase(BBCNINJSTestCase):
filename = 'bbc-ninjs-text-test.json'
def test_trans_attributes(self):
self.assertEqual(self.items[0].get(ITEM_TYPE), CONTENT_TYPE.TEXT)
self.assertEqual(self.items[0].get('subject')[0].get('qcode'), '11016007')
guid_hash = hashlib.sha1('https://www.example.com//12345'.encode('utf8')).hexdigest()
guid = generate_guid(type=GUID_TAG, id=guid_hash)
self.assertEqual(self.items[0].get('guid'), guid)
class CompositeTestCase(BBCNINJSTestCase):
filename = 'bbc-ninjs-comp-test.json'
def test_parsed_items(self):
# The picture
self.assertEqual(self.items[1].get(ITEM_TYPE), CONTENT_TYPE.PICTURE)
self.assertEqual(self.items[1].get('headline'), 'logo-footer.png')
self.assertEqual(self.items[1].get('description_text'), 'abc')
# The text item
self.assertEqual(self.items[0].get(ITEM_TYPE), CONTENT_TYPE.TEXT)
self.assertEqual(self.items[0].get('headline'), 'abcdef')
# The associated picture in the text item
self.assertEqual(self.items[0].get('associations').get('featuremedia').get(ITEM_TYPE), CONTENT_TYPE.PICTURE)
self.assertEqual(self.items[0].get('associations').get('featuremedia').get('headline'), 'logo-footer.png')
self.assertEqual(self.items[0].get('associations').get('featuremedia').get('description_text'), 'abc')
# The composite
self.assertEqual(self.items[2].get(ITEM_TYPE), CONTENT_TYPE.COMPOSITE)
| agpl-3.0 |
simonlynen/or-tools | examples/python/debruijn_binary.py | 32 | 5936 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
de Bruijn sequences in Google CP Solver.
Implementation of de Bruijn sequences in Minizinc, both 'classical' and
'arbitrary'.
The 'arbitrary' version is when the length of the sequence (m here) is <
base**n.
Compare with the the web based programs:
http://www.hakank.org/comb/debruijn.cgi
http://www.hakank.org/comb/debruijn_arb.cgi
Compare with the following models:
* Tailor/Essence': http://hakank.org/tailor/debruijn.eprime
* MiniZinc: http://hakank.org/minizinc/debruijn_binary.mzn
* SICStus: http://hakank.org/sicstus/debruijn.pl
* Zinc: http://hakank.org/minizinc/debruijn_binary.zinc
* Choco: http://hakank.org/choco/DeBruijn.java
* Comet: http://hakank.org/comet/debruijn.co
* ECLiPSe: http://hakank.org/eclipse/debruijn.ecl
* Gecode: http://hakank.org/gecode/debruijn.cpp
* Gecode/R: http://hakank.org/gecode_r/debruijn_binary.rb
* JaCoP: http://hakank.org/JaCoP/DeBruijn.java
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
import string
from ortools.constraint_solver import pywrapcp
# converts a number (s) <-> an array of numbers (t) in the specific base.
def toNum(solver, t, s, base):
tlen = len(t)
solver.Add(
s == solver.Sum([(base ** (tlen - i - 1)) * t[i] for i in range(tlen)]))
def main(base=2, n=3, m=8):
# Create the solver.
solver = pywrapcp.Solver("de Bruijn sequences")
#
# data
#
# base = 2 # the base to use, i.e. the alphabet 0..n-1
# n = 3 # number of bits to use (n = 4 -> 0..base^n-1 = 0..2^4 -1, i.e. 0..15)
# m = base**n # the length of the sequence. For "arbitrary" de Bruijn
# sequences
# base = 4
# n = 4
# m = base**n
# harder problem
#base = 13
#n = 4
#m = 52
# for n = 4 with different value of base
# base = 2 0.030 seconds 16 failures
# base = 3 0.041 108
# base = 4 0.070 384
# base = 5 0.231 1000
# base = 6 0.736 2160
# base = 7 2.2 seconds 4116
# base = 8 6 seconds 7168
# base = 9 16 seconds 11664
# base = 10 42 seconds 18000
# base = 6
# n = 4
# m = base**n
# if True then ensure that the number of occurrences of 0..base-1 is
# the same (and if m mod base = 0)
check_same_gcc = True
print "base: %i n: %i m: %i" % (base, n, m)
if check_same_gcc:
print "Checks gcc"
# declare variables
x = [solver.IntVar(0, (base ** n) - 1, "x%i" % i) for i in range(m)]
binary = {}
for i in range(m):
for j in range(n):
binary[(i, j)] = solver.IntVar(0, base - 1, "x_%i_%i" % (i, j))
bin_code = [solver.IntVar(0, base - 1, "bin_code%i" % i) for i in range(m)]
#
# constraints
#
#solver.Add(solver.AllDifferent([x[i] for i in range(m)]))
solver.Add(solver.AllDifferent(x))
# converts x <-> binary
for i in range(m):
t = [solver.IntVar(0, base - 1, "t_%i" % j) for j in range(n)]
toNum(solver, t, x[i], base)
for j in range(n):
solver.Add(binary[(i, j)] == t[j])
# the de Bruijn condition
# the first elements in binary[i] is the same as the last
# elements in binary[i-i]
for i in range(1, m - 1):
for j in range(1, n - 1):
solver.Add(binary[(i - 1, j)] == binary[(i, j - 1)])
# ... and around the corner
for j in range(1, n):
solver.Add(binary[(m - 1, j)] == binary[(0, j - 1)])
# converts binary -> bin_code
for i in range(m):
solver.Add(bin_code[i] == binary[(i, 0)])
# extra: ensure that all the numbers in the de Bruijn sequence
# (bin_code) has the same occurrences (if check_same_gcc is True
# and mathematically possible)
gcc = [solver.IntVar(0, m, "gcc%i" % i) for i in range(base)]
solver.Add(solver.Distribute(bin_code, range(base), gcc))
if check_same_gcc and m % base == 0:
for i in range(1, base):
solver.Add(gcc[i] == gcc[i - 1])
#
# solution and search
#
solution = solver.Assignment()
solution.Add([x[i] for i in range(m)])
solution.Add([bin_code[i] for i in range(m)])
# solution.Add([binary[(i,j)] for i in range(m) for j in range(n)])
solution.Add([gcc[i] for i in range(base)])
db = solver.Phase([x[i] for i in range(m)] + [bin_code[i] for i in range(m)],
solver.CHOOSE_MIN_SIZE_LOWEST_MAX,
solver.ASSIGN_MIN_VALUE)
num_solutions = 0
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print "\nSolution %i" % num_solutions
print "x:", [x[i].Value() for i in range(m)]
print "gcc:", [gcc[i].Value() for i in range(base)]
print "de Bruijn sequence:", [bin_code[i].Value() for i in range(m)]
# for i in range(m):
# for j in range(n):
# print binary[(i,j)].Value(),
# print
# print
solver.EndSearch()
if num_solutions == 0:
print "No solution found"
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
base = 2
n = 3
m = base ** n
if __name__ == "__main__":
if len(sys.argv) > 1:
base = string.atoi(sys.argv[1])
if len(sys.argv) > 2:
n = string.atoi(sys.argv[2])
if len(sys.argv) > 3:
m = string.atoi(sys.argv[3])
main(base, n, m)
| apache-2.0 |
ncdesouza/bookworm | env/lib/python2.7/site-packages/sqlalchemy/sql/naming.py | 33 | 4588 | # sqlalchemy/naming.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Establish constraint and index naming conventions.
"""
from .schema import Constraint, ForeignKeyConstraint, PrimaryKeyConstraint, \
UniqueConstraint, CheckConstraint, Index, Table, Column
from .. import event, events
from .. import exc
from .elements import _truncated_label, _defer_name, _defer_none_name, conv
import re
class ConventionDict(object):
def __init__(self, const, table, convention):
self.const = const
self._is_fk = isinstance(const, ForeignKeyConstraint)
self.table = table
self.convention = convention
self._const_name = const.name
def _key_table_name(self):
return self.table.name
def _column_X(self, idx):
if self._is_fk:
fk = self.const.elements[idx]
return fk.parent
else:
return list(self.const.columns)[idx]
def _key_constraint_name(self):
if isinstance(self._const_name, (type(None), _defer_none_name)):
raise exc.InvalidRequestError(
"Naming convention including "
"%(constraint_name)s token requires that "
"constraint is explicitly named."
)
if not isinstance(self._const_name, conv):
self.const.name = None
return self._const_name
def _key_column_X_name(self, idx):
return self._column_X(idx).name
def _key_column_X_label(self, idx):
return self._column_X(idx)._label
def _key_referred_table_name(self):
fk = self.const.elements[0]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return reftable
def _key_referred_column_X_name(self, idx):
fk = self.const.elements[idx]
refs = fk.target_fullname.split(".")
if len(refs) == 3:
refschema, reftable, refcol = refs
else:
reftable, refcol = refs
return refcol
def __getitem__(self, key):
if key in self.convention:
return self.convention[key](self.const, self.table)
elif hasattr(self, '_key_%s' % key):
return getattr(self, '_key_%s' % key)()
else:
col_template = re.match(r".*_?column_(\d+)_.+", key)
if col_template:
idx = col_template.group(1)
attr = "_key_" + key.replace(idx, "X")
idx = int(idx)
if hasattr(self, attr):
return getattr(self, attr)(idx)
raise KeyError(key)
_prefix_dict = {
Index: "ix",
PrimaryKeyConstraint: "pk",
CheckConstraint: "ck",
UniqueConstraint: "uq",
ForeignKeyConstraint: "fk"
}
def _get_convention(dict_, key):
for super_ in key.__mro__:
if super_ in _prefix_dict and _prefix_dict[super_] in dict_:
return dict_[_prefix_dict[super_]]
elif super_ in dict_:
return dict_[super_]
else:
return None
def _constraint_name_for_table(const, table):
metadata = table.metadata
convention = _get_convention(metadata.naming_convention, type(const))
if isinstance(const.name, conv):
return const.name
elif convention is not None and (
const.name is None or not isinstance(const.name, conv) and
"constraint_name" in convention
):
return conv(
convention % ConventionDict(const, table,
metadata.naming_convention)
)
elif isinstance(convention, _defer_none_name):
return None
@event.listens_for(Constraint, "after_parent_attach")
@event.listens_for(Index, "after_parent_attach")
def _constraint_name(const, table):
if isinstance(table, Column):
# for column-attached constraint, set another event
# to link the column attached to the table as this constraint
# associated with the table.
event.listen(table, "after_parent_attach",
lambda col, table: _constraint_name(const, table)
)
elif isinstance(table, Table):
if isinstance(const.name, (conv, _defer_name)):
return
newname = _constraint_name_for_table(const, table)
if newname is not None:
const.name = newname
| gpl-3.0 |
jakew02/android_kernel_lge_bullhead | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
Mazecreator/tensorflow | tensorflow/contrib/learn/python/learn/datasets/base_test.py | 136 | 3072 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import test
mock = test.mock
_TIMEOUT = IOError(110, "timeout")
class BaseTest(test.TestCase):
"""Test load csv functions."""
def testUrlretrieveRetriesOnIOError(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
_TIMEOUT, _TIMEOUT, _TIMEOUT, _TIMEOUT, _TIMEOUT, None
]
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert full backoff was tried
actual_list = [arg[0][0] for arg in mock_time.sleep.call_args_list]
expected_list = [1, 2, 4, 8, 16]
for actual, expected in zip(actual_list, expected_list):
self.assertLessEqual(abs(actual - expected), 0.25 * expected)
self.assertEquals(len(actual_list), len(expected_list))
def testUrlretrieveRaisesAfterRetriesAreExhausted(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
_TIMEOUT,
]
with self.assertRaises(IOError):
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert full backoff was tried
actual_list = [arg[0][0] for arg in mock_time.sleep.call_args_list]
expected_list = [1, 2, 4, 8, 16]
for actual, expected in zip(actual_list, expected_list):
self.assertLessEqual(abs(actual - expected), 0.25 * expected)
self.assertEquals(len(actual_list), len(expected_list))
def testUrlretrieveRaisesOnNonRetriableErrorWithoutRetry(self):
with mock.patch.object(base, "time") as mock_time:
with mock.patch.object(base, "urllib") as mock_urllib:
mock_urllib.request.urlretrieve.side_effect = [
IOError(2, "No such file or directory"),
]
with self.assertRaises(IOError):
base.urlretrieve_with_retry("http://dummy.com", "/tmp/dummy")
# Assert no retries
self.assertFalse(mock_time.called)
if __name__ == "__main__":
test.main()
| apache-2.0 |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Amazon/CloudDrive/Files/ListFiles.py | 5 | 6477 | # -*- coding: utf-8 -*-
###############################################################################
#
# ListFiles
# Returns a list of files.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListFiles(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListFiles Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListFiles, self).__init__(temboo_session, '/Library/Amazon/CloudDrive/Files/ListFiles')
def new_input_set(self):
return ListFilesInputSet()
def _make_result_set(self, result, path):
return ListFilesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListFilesChoreographyExecution(session, exec_id, path)
class ListFilesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListFiles
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(ListFilesInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Amazon. Required unless providing a valid AccessToken.)
"""
super(ListFilesInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Amazon. Required unless providing a valid AccessToken.)
"""
super(ListFilesInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma-separated list of additional fields to include in the response.)
"""
super(ListFilesInputSet, self)._set_input('Fields', value)
def set_Filters(self, value):
"""
Set the value of the Filters input for this Choreo. ((optional, string) A filter used to narrow the result set (e.g., name:MyFile.txt). The default value is "kind:FILE". To a make a request using no filters, you can pass "none".)
"""
super(ListFilesInputSet, self)._set_input('Filters', value)
def set_HandleRequestThrottling(self, value):
"""
Set the value of the HandleRequestThrottling input for this Choreo. ((optional, boolean) Whether or not to perform a retry sequence if a throttling error occurs. Set to true to enable this feature. The request will be retried up-to five times when enabled.)
"""
super(ListFilesInputSet, self)._set_input('HandleRequestThrottling', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, string) The maximum number of records to be returned.)
"""
super(ListFilesInputSet, self)._set_input('Limit', value)
def set_MetaDataURL(self, value):
"""
Set the value of the MetaDataURL input for this Choreo. ((optional, string) The appropriate metadataUrl for your account. When not provided, the Choreo will lookup the URL using the Account.GetEndpoint Choreo.)
"""
super(ListFilesInputSet, self)._set_input('MetaDataURL', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(ListFilesInputSet, self)._set_input('RefreshToken', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, json) A JSON array containing sort properties (e.g., ["name ASC","contentProperties.size" DESC]).)
"""
super(ListFilesInputSet, self)._set_input('Sort', value)
def set_StartToken(self, value):
"""
Set the value of the StartToken input for this Choreo. ((optional, string) The nextToken returned from a previous request. Used to paginate through results.)
"""
super(ListFilesInputSet, self)._set_input('StartToken', value)
class ListFilesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListFiles Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Amazon.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class ListFilesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListFilesResultSet(response, path)
| apache-2.0 |
miipl-naveen/optibizz | addons/account/res_config.py | 200 | 25453 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from dateutil.relativedelta import relativedelta
import openerp
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.translate import _
from openerp.osv import fields, osv
class account_config_settings(osv.osv_memory):
_name = 'account.config.settings'
_inherit = 'res.config.settings'
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True),
'has_default_company': fields.boolean('Has default company', readonly=True),
'expects_chart_of_accounts': fields.related('company_id', 'expects_chart_of_accounts', type='boolean',
string='This company has its own chart of accounts',
help="""Check this box if this company is a legal entity."""),
'currency_id': fields.related('company_id', 'currency_id', type='many2one', relation='res.currency', required=True,
string='Default company currency', help="Main currency of the company."),
'paypal_account': fields.related('company_id', 'paypal_account', type='char', size=128,
string='Paypal account', help="Paypal account (email) for receiving online payments (credit card, etc.) If you set a paypal account, the customer will be able to pay your invoices or quotations with a button \"Pay with Paypal\" in automated emails or through the Odoo portal."),
'company_footer': fields.related('company_id', 'rml_footer', type='text', readonly=True,
string='Bank accounts footer preview', help="Bank accounts as printed in the footer of each printed document"),
'has_chart_of_accounts': fields.boolean('Company has a chart of accounts'),
'chart_template_id': fields.many2one('account.chart.template', 'Template', domain="[('visible','=', True)]"),
'code_digits': fields.integer('# of Digits', help="No. of digits to use for account code"),
'tax_calculation_rounding_method': fields.related('company_id',
'tax_calculation_rounding_method', type='selection', selection=[
('round_per_line', 'Round per line'),
('round_globally', 'Round globally'),
], string='Tax calculation rounding method',
help="If you select 'Round per line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'sale_tax': fields.many2one("account.tax.template", "Default sale tax"),
'purchase_tax': fields.many2one("account.tax.template", "Default purchase tax"),
'sale_tax_rate': fields.float('Sales tax (%)'),
'purchase_tax_rate': fields.float('Purchase tax (%)'),
'complete_tax_set': fields.boolean('Complete set of taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete'),
'has_fiscal_year': fields.boolean('Company has a fiscal year'),
'date_start': fields.date('Start date', required=True),
'date_stop': fields.date('End date', required=True),
'period': fields.selection([('month', 'Monthly'), ('3months','3 Monthly')], 'Periods', required=True),
'sale_journal_id': fields.many2one('account.journal', 'Sale journal'),
'sale_sequence_prefix': fields.related('sale_journal_id', 'sequence_id', 'prefix', type='char', string='Invoice sequence'),
'sale_sequence_next': fields.related('sale_journal_id', 'sequence_id', 'number_next', type='integer', string='Next invoice number'),
'sale_refund_journal_id': fields.many2one('account.journal', 'Sale refund journal'),
'sale_refund_sequence_prefix': fields.related('sale_refund_journal_id', 'sequence_id', 'prefix', type='char', string='Credit note sequence'),
'sale_refund_sequence_next': fields.related('sale_refund_journal_id', 'sequence_id', 'number_next', type='integer', string='Next credit note number'),
'purchase_journal_id': fields.many2one('account.journal', 'Purchase journal'),
'purchase_sequence_prefix': fields.related('purchase_journal_id', 'sequence_id', 'prefix', type='char', string='Supplier invoice sequence'),
'purchase_sequence_next': fields.related('purchase_journal_id', 'sequence_id', 'number_next', type='integer', string='Next supplier invoice number'),
'purchase_refund_journal_id': fields.many2one('account.journal', 'Purchase refund journal'),
'purchase_refund_sequence_prefix': fields.related('purchase_refund_journal_id', 'sequence_id', 'prefix', type='char', string='Supplier credit note sequence'),
'purchase_refund_sequence_next': fields.related('purchase_refund_journal_id', 'sequence_id', 'number_next', type='integer', string='Next supplier credit note number'),
'module_account_check_writing': fields.boolean('Pay your suppliers by check',
help='This allows you to check writing and printing.\n'
'-This installs the module account_check_writing.'),
'module_account_accountant': fields.boolean('Full accounting features: journals, legal statements, chart of accounts, etc.',
help="""If you do not check this box, you will be able to do invoicing & payments, but not accounting (Journal Items, Chart of Accounts, ...)"""),
'module_account_asset': fields.boolean('Assets management',
help='This allows you to manage the assets owned by a company or a person.\n'
'It keeps track of the depreciation occurred on those assets, and creates account move for those depreciation lines.\n'
'-This installs the module account_asset. If you do not check this box, you will be able to do invoicing & payments, '
'but not accounting (Journal Items, Chart of Accounts, ...)'),
'module_account_budget': fields.boolean('Budget management',
help='This allows accountants to manage analytic and crossovered budgets. '
'Once the master budgets and the budgets are defined, '
'the project managers can set the planned amount on each analytic account.\n'
'-This installs the module account_budget.'),
'module_account_payment': fields.boolean('Manage payment orders',
help='This allows you to create and manage your payment orders, with purposes to \n'
'* serve as base for an easy plug-in of various automated payment mechanisms, and \n'
'* provide a more efficient way to manage invoice payments.\n'
'-This installs the module account_payment.' ),
'module_account_voucher': fields.boolean('Manage customer payments',
help='This includes all the basic requirements of voucher entries for bank, cash, sales, purchase, expense, contra, etc.\n'
'-This installs the module account_voucher.'),
'module_account_followup': fields.boolean('Manage customer payment follow-ups',
help='This allows to automate letters for unpaid invoices, with multi-level recalls.\n'
'-This installs the module account_followup.'),
'module_product_email_template': fields.boolean('Send products tools and information at the invoice confirmation',
help='With this module, link your products to a template to send complete information and tools to your customer.\n'
'For instance when invoicing a training, the training agenda and materials will automatically be send to your customers.'),
'group_proforma_invoices': fields.boolean('Allow pro-forma invoices',
implied_group='account.group_proforma_invoices',
help="Allows you to put invoices in pro-forma state."),
'default_sale_tax': fields.many2one('account.tax', 'Default sale tax',
help="This sale tax will be assigned by default on new products."),
'default_purchase_tax': fields.many2one('account.tax', 'Default purchase tax',
help="This purchase tax will be assigned by default on new products."),
'decimal_precision': fields.integer('Decimal precision on journal entries',
help="""As an example, a decimal precision of 2 will allow journal entries like: 9.99 EUR, whereas a decimal precision of 4 will allow journal entries like: 0.0231 EUR."""),
'group_multi_currency': fields.boolean('Allow multi currencies',
implied_group='base.group_multi_currency',
help="Allows you multi currency environment"),
'group_analytic_accounting': fields.boolean('Analytic accounting',
implied_group='analytic.group_analytic_accounting',
help="Allows you to use the analytic accounting."),
'group_check_supplier_invoice_total': fields.boolean('Check the total of supplier invoices',
implied_group="account.group_supplier_inv_check_total"),
'income_currency_exchange_account_id': fields.related(
'company_id', 'income_currency_exchange_account_id',
type='many2one',
relation='account.account',
string="Gain Exchange Rate Account",
domain="[('type', '=', 'other'), ('company_id', '=', company_id)]]"),
'expense_currency_exchange_account_id': fields.related(
'company_id', 'expense_currency_exchange_account_id',
type="many2one",
relation='account.account',
string="Loss Exchange Rate Account",
domain="[('type', '=', 'other'), ('company_id', '=', company_id)]]"),
}
def _check_account_gain(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.income_currency_exchange_account_id.company_id and obj.company_id != obj.income_currency_exchange_account_id.company_id:
return False
return True
def _check_account_loss(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.expense_currency_exchange_account_id.company_id and obj.company_id != obj.expense_currency_exchange_account_id.company_id:
return False
return True
_constraints = [
(_check_account_gain, 'The company of the gain exchange rate account must be the same than the company selected.', ['income_currency_exchange_account_id']),
(_check_account_loss, 'The company of the loss exchange rate account must be the same than the company selected.', ['expense_currency_exchange_account_id']),
]
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.id
def _default_has_default_company(self, cr, uid, context=None):
count = self.pool.get('res.company').search_count(cr, uid, [], context=context)
return bool(count == 1)
def _get_default_fiscalyear_data(self, cr, uid, company_id, context=None):
"""Compute default period, starting and ending date for fiscalyear
- if in a fiscal year, use its period, starting and ending date
- if past fiscal year, use its period, and new dates [ending date of the latest +1 day ; ending date of the latest +1 year]
- if no fiscal year, use monthly, 1st jan, 31th dec of this year
:return: (date_start, date_stop, period) at format DEFAULT_SERVER_DATETIME_FORMAT
"""
fiscalyear_ids = self.pool.get('account.fiscalyear').search(cr, uid,
[('date_start', '<=', time.strftime(DF)), ('date_stop', '>=', time.strftime(DF)),
('company_id', '=', company_id)])
if fiscalyear_ids:
# is in a current fiscal year, use this one
fiscalyear = self.pool.get('account.fiscalyear').browse(cr, uid, fiscalyear_ids[0], context=context)
if len(fiscalyear.period_ids) == 5: # 4 periods of 3 months + opening period
period = '3months'
else:
period = 'month'
return (fiscalyear.date_start, fiscalyear.date_stop, period)
else:
past_fiscalyear_ids = self.pool.get('account.fiscalyear').search(cr, uid,
[('date_stop', '<=', time.strftime(DF)), ('company_id', '=', company_id)])
if past_fiscalyear_ids:
# use the latest fiscal, sorted by (start_date, id)
latest_year = self.pool.get('account.fiscalyear').browse(cr, uid, past_fiscalyear_ids[-1], context=context)
latest_stop = datetime.datetime.strptime(latest_year.date_stop, DF)
if len(latest_year.period_ids) == 5:
period = '3months'
else:
period = 'month'
return ((latest_stop+datetime.timedelta(days=1)).strftime(DF), latest_stop.replace(year=latest_stop.year+1).strftime(DF), period)
else:
return (time.strftime('%Y-01-01'), time.strftime('%Y-12-31'), 'month')
_defaults = {
'company_id': _default_company,
'has_default_company': _default_has_default_company,
}
def create(self, cr, uid, values, context=None):
id = super(account_config_settings, self).create(cr, uid, values, context)
# Hack: to avoid some nasty bug, related fields are not written upon record creation.
# Hence we write on those fields here.
vals = {}
for fname, field in self._columns.iteritems():
if isinstance(field, fields.related) and fname in values:
vals[fname] = values[fname]
self.write(cr, uid, [id], vals, context)
return id
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
# update related fields
values = {}
values['currency_id'] = False
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
has_chart_of_accounts = company_id not in self.pool.get('account.installer').get_unconfigured_cmp(cr, uid)
fiscalyear_count = self.pool.get('account.fiscalyear').search_count(cr, uid,
[('date_start', '<=', time.strftime('%Y-%m-%d')), ('date_stop', '>=', time.strftime('%Y-%m-%d')),
('company_id', '=', company_id)])
date_start, date_stop, period = self._get_default_fiscalyear_data(cr, uid, company_id, context=context)
values = {
'expects_chart_of_accounts': company.expects_chart_of_accounts,
'currency_id': company.currency_id.id,
'paypal_account': company.paypal_account,
'company_footer': company.rml_footer,
'has_chart_of_accounts': has_chart_of_accounts,
'has_fiscal_year': bool(fiscalyear_count),
'chart_template_id': False,
'tax_calculation_rounding_method': company.tax_calculation_rounding_method,
'date_start': date_start,
'date_stop': date_stop,
'period': period,
}
# update journals and sequences
for journal_type in ('sale', 'sale_refund', 'purchase', 'purchase_refund'):
for suffix in ('_journal_id', '_sequence_prefix', '_sequence_next'):
values[journal_type + suffix] = False
journal_obj = self.pool.get('account.journal')
journal_ids = journal_obj.search(cr, uid, [('company_id', '=', company_id)])
for journal in journal_obj.browse(cr, uid, journal_ids):
if journal.type in ('sale', 'sale_refund', 'purchase', 'purchase_refund'):
values.update({
journal.type + '_journal_id': journal.id,
journal.type + '_sequence_prefix': journal.sequence_id.prefix,
journal.type + '_sequence_next': journal.sequence_id.number_next,
})
# update taxes
ir_values = self.pool.get('ir.values')
taxes_id = ir_values.get_default(cr, uid, 'product.template', 'taxes_id', company_id=company_id)
supplier_taxes_id = ir_values.get_default(cr, uid, 'product.template', 'supplier_taxes_id', company_id=company_id)
values.update({
'default_sale_tax': isinstance(taxes_id, list) and taxes_id[0] or taxes_id,
'default_purchase_tax': isinstance(supplier_taxes_id, list) and supplier_taxes_id[0] or supplier_taxes_id,
})
# update gain/loss exchange rate accounts
values.update({
'income_currency_exchange_account_id': company.income_currency_exchange_account_id and company.income_currency_exchange_account_id.id or False,
'expense_currency_exchange_account_id': company.expense_currency_exchange_account_id and company.expense_currency_exchange_account_id.id or False
})
return {'value': values}
def onchange_chart_template_id(self, cr, uid, ids, chart_template_id, context=None):
tax_templ_obj = self.pool.get('account.tax.template')
res = {'value': {
'complete_tax_set': False, 'sale_tax': False, 'purchase_tax': False,
'sale_tax_rate': 15, 'purchase_tax_rate': 15,
}}
if chart_template_id:
# update complete_tax_set, sale_tax and purchase_tax
chart_template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
res['value'].update({'complete_tax_set': chart_template.complete_tax_set})
if chart_template.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
sale_tax_ids = tax_templ_obj.search(cr, uid,
[("chart_template_id", "=", chart_template_id), ('type_tax_use', 'in', ('sale','all'))],
order="sequence, id desc")
purchase_tax_ids = tax_templ_obj.search(cr, uid,
[("chart_template_id", "=", chart_template_id), ('type_tax_use', 'in', ('purchase','all'))],
order="sequence, id desc")
res['value']['sale_tax'] = sale_tax_ids and sale_tax_ids[0] or False
res['value']['purchase_tax'] = purchase_tax_ids and purchase_tax_ids[0] or False
if chart_template.code_digits:
res['value']['code_digits'] = chart_template.code_digits
return res
def onchange_tax_rate(self, cr, uid, ids, rate, context=None):
return {'value': {'purchase_tax_rate': rate or False}}
def onchange_multi_currency(self, cr, uid, ids, group_multi_currency, context=None):
res = {}
if not group_multi_currency:
res['value'] = {'income_currency_exchange_account_id': False, 'expense_currency_exchange_account_id': False}
return res
def onchange_start_date(self, cr, uid, id, start_date):
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = (start_date + relativedelta(months=12)) - relativedelta(days=1)
return {'value': {'date_stop': end_date.strftime('%Y-%m-%d')}}
return {}
def open_company_form(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
return {
'type': 'ir.actions.act_window',
'name': 'Configure your Company',
'res_model': 'res.company',
'res_id': config.company_id.id,
'view_mode': 'form',
}
def set_default_taxes(self, cr, uid, ids, context=None):
""" set default sale and purchase taxes for products """
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
config = self.browse(cr, uid, ids[0], context)
ir_values.set_default(cr, SUPERUSER_ID, 'product.template', 'taxes_id',
config.default_sale_tax and [config.default_sale_tax.id] or False, company_id=config.company_id.id)
ir_values.set_default(cr, SUPERUSER_ID, 'product.template', 'supplier_taxes_id',
config.default_purchase_tax and [config.default_purchase_tax.id] or False, company_id=config.company_id.id)
def set_chart_of_accounts(self, cr, uid, ids, context=None):
""" install a chart of accounts for the given company (if required) """
config = self.browse(cr, uid, ids[0], context)
if config.chart_template_id:
assert config.expects_chart_of_accounts and not config.has_chart_of_accounts
wizard = self.pool.get('wizard.multi.charts.accounts')
wizard_id = wizard.create(cr, uid, {
'company_id': config.company_id.id,
'chart_template_id': config.chart_template_id.id,
'code_digits': config.code_digits or 6,
'sale_tax': config.sale_tax.id,
'purchase_tax': config.purchase_tax.id,
'sale_tax_rate': config.sale_tax_rate,
'purchase_tax_rate': config.purchase_tax_rate,
'complete_tax_set': config.complete_tax_set,
'currency_id': config.currency_id.id,
}, context)
wizard.execute(cr, uid, [wizard_id], context)
def set_fiscalyear(self, cr, uid, ids, context=None):
""" create a fiscal year for the given company (if necessary) """
config = self.browse(cr, uid, ids[0], context)
if config.has_chart_of_accounts or config.chart_template_id:
fiscalyear = self.pool.get('account.fiscalyear')
fiscalyear_count = fiscalyear.search_count(cr, uid,
[('date_start', '<=', config.date_start), ('date_stop', '>=', config.date_stop),
('company_id', '=', config.company_id.id)],
context=context)
if not fiscalyear_count:
name = code = config.date_start[:4]
if int(name) != int(config.date_stop[:4]):
name = config.date_start[:4] +'-'+ config.date_stop[:4]
code = config.date_start[2:4] +'-'+ config.date_stop[2:4]
vals = {
'name': name,
'code': code,
'date_start': config.date_start,
'date_stop': config.date_stop,
'company_id': config.company_id.id,
}
fiscalyear_id = fiscalyear.create(cr, uid, vals, context=context)
if config.period == 'month':
fiscalyear.create_period(cr, uid, [fiscalyear_id])
elif config.period == '3months':
fiscalyear.create_period3(cr, uid, [fiscalyear_id])
def get_default_dp(self, cr, uid, fields, context=None):
dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product','decimal_account')
return {'decimal_precision': dp.digits}
def set_default_dp(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product','decimal_account')
dp.write({'digits': config.decimal_precision})
def onchange_analytic_accounting(self, cr, uid, ids, analytic_accounting, context=None):
if analytic_accounting:
return {'value': {
'module_account_accountant': True,
}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
miipl-naveen/optibizz | addons/gamification/__openerp__.py | 299 | 2464 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Human Resources',
'website' : 'https://www.odoo.com/page/gamification',
'depends': ['mail', 'email_template', 'web_kanban_gauge'],
'description': """
Gamification process
====================
The Gamification module provides ways to evaluate and motivate the users of OpenERP.
The users can be evaluated using goals and numerical objectives to reach.
**Goals** are assigned through **challenges** to evaluate and compare members of a team with each others and through time.
For non-numerical achievements, **badges** can be granted to users. From a simple "thank you" to an exceptional achievement, a badge is an easy way to exprimate gratitude to a user for their good work.
Both goals and badges are flexibles and can be adapted to a large range of modules and actions. When installed, this module creates easy goals to help new users to discover OpenERP and configure their user profile.
""",
'data': [
'wizard/update_goal.xml',
'wizard/grant_badge.xml',
'views/badge.xml',
'views/challenge.xml',
'views/goal.xml',
'data/cron.xml',
'security/gamification_security.xml',
'security/ir.model.access.csv',
'data/goal_base.xml',
'data/badge.xml',
'views/gamification.xml',
],
'application': True,
'auto_install': False,
'qweb': ['static/src/xml/gamification.xml'],
}
| agpl-3.0 |
divya-csekar/flask-microblog-server | flask/Lib/site-packages/sqlalchemy/testing/entities.py | 33 | 2992 | # testing/entities.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sqlalchemy as sa
from sqlalchemy import exc as sa_exc
_repr_stack = set()
class BasicEntity(object):
def __init__(self, **kw):
for key, value in kw.items():
setattr(self, key, value)
def __repr__(self):
if id(self) in _repr_stack:
return object.__repr__(self)
_repr_stack.add(id(self))
try:
return "%s(%s)" % (
(self.__class__.__name__),
', '.join(["%s=%r" % (key, getattr(self, key))
for key in sorted(self.__dict__.keys())
if not key.startswith('_')]))
finally:
_repr_stack.remove(id(self))
_recursion_stack = set()
class ComparableEntity(BasicEntity):
def __hash__(self):
return hash(self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
"""'Deep, sparse compare.
Deeply compare two entities, following the non-None attributes of the
non-persisted object, if possible.
"""
if other is self:
return True
elif not self.__class__ == other.__class__:
return False
if id(self) in _recursion_stack:
return True
_recursion_stack.add(id(self))
try:
# pick the entity that's not SA persisted as the source
try:
self_key = sa.orm.attributes.instance_state(self).key
except sa.orm.exc.NO_STATE:
self_key = None
if other is None:
a = self
b = other
elif self_key is not None:
a = other
b = self
else:
a = self
b = other
for attr in list(a.__dict__):
if attr.startswith('_'):
continue
value = getattr(a, attr)
try:
# handle lazy loader errors
battr = getattr(b, attr)
except (AttributeError, sa_exc.UnboundExecutionError):
return False
if hasattr(value, '__iter__'):
if hasattr(value, '__getitem__') and not hasattr(
value, 'keys'):
if list(value) != list(battr):
return False
else:
if set(value) != set(battr):
return False
else:
if value is not None and value != battr:
return False
return True
finally:
_recursion_stack.remove(id(self))
| bsd-3-clause |
double12gzh/nova | nova/cells/filters/image_properties.py | 61 | 2516 | # Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image properties filter.
Image metadata named 'hypervisor_version_requires' with a version specification
may be specified to ensure the build goes to a cell which has hypervisors of
the required version.
If either the version requirement on the image or the hypervisor capability
of the cell is not present, this filter returns without filtering out the
cells.
"""
from distutils import versionpredicate
from oslo_log import log as logging
from nova.cells import filters
LOG = logging.getLogger(__name__)
class ImagePropertiesFilter(filters.BaseCellFilter):
"""Image properties filter. Works by specifying the hypervisor required in
the image metadata and the supported hypervisor version in cell
capabilities.
"""
def filter_all(self, cells, filter_properties):
"""Override filter_all() which operates on the full list
of cells...
"""
request_spec = filter_properties.get('request_spec', {})
image_properties = request_spec.get('image', {}).get('properties', {})
hypervisor_version_requires = image_properties.get(
'hypervisor_version_requires')
if hypervisor_version_requires is None:
return cells
filtered_cells = []
for cell in cells:
version = cell.capabilities.get('prominent_hypervisor_version')
if version:
l = list(version)
version = str(l[0])
if not version or self._matches_version(version,
hypervisor_version_requires):
filtered_cells.append(cell)
return filtered_cells
def _matches_version(self, version, version_requires):
predicate = versionpredicate.VersionPredicate(
'prop (%s)' % version_requires)
return predicate.satisfied_by(version)
| apache-2.0 |
rversteegen/commandergenius | project/jni/python/src/Lib/idlelib/MultiCall.py | 49 | 17282 | """
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import sys
import string
import re
import Tkinter
import macosxSupport
# the event type constants, which define the meaning of mc_type
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
# the modifier state constants, which define the meaning of mc_state
MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
# define the list of modifiers, to be used in complex event types.
if macosxSupport.runningAsOSXApp():
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
# a dictionary to map a modifier name into its number
_modifier_names = dict([(name, number)
for number in range(len(_modifiers))
for name in _modifiers[number]])
# A binder is a class which binds functions to one type of event. It has two
# methods: bind and unbind, which get a function and a parsed sequence, as
# returned by _parse_sequence(). There are two types of binders:
# _SimpleBinder handles event types with no modifiers and no detail.
# No Python functions are called when no events are binded.
# _ComplexBinder handles event types with modifiers and a detail.
# A Python function is called each time an event is generated.
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<'+_types[type][0]+'>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l = self.bindedfuncs, mc_type = self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst,
self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
def __del__(self):
if self.handlerid:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
# An int in range(1 << len(_modifiers)) represents a combination of modifiers
# (if the least significent bit is on, _modifiers[0] is on, and so on).
# _state_subsets gives for each combination of modifiers, or *state*,
# a list of the states which are a subset of it. This list is ordered by the
# number of modifiers is the state - the most specific state comes first.
_states = range(1 << len(_modifiers))
_state_names = [reduce(lambda x, y: x + y,
[_modifiers[i][0]+'-' for i in range(len(_modifiers))
if (1 << i) & s],
"")
for s in _states]
_state_subsets = map(lambda i: filter(lambda j: not (j & (~i)), _states),
_states)
for l in _state_subsets:
l.sort(lambda a, b, nummod = lambda x: len(filter(lambda i: (1<<i) & x,
range(len(_modifiers)))):
nummod(b) - nummod(a))
# _state_codes gives for each state, the portable code to be passed as mc_state
_state_codes = [reduce(lambda x, y: x | y,
[_modifier_masks[i] for i in range(len(_modifiers))
if (1 << i) & s],
0)
for s in _states]
class _ComplexBinder:
# This class binds many functions, and only unbinds them when it is deleted.
# self.handlerids is the list of seqs and ids of binded handler functions.
# The binded functions sit in a dictionary of lists of lists, which maps
# a detail (or None) and a state into a list of functions.
# When a new detail is discovered, handlers for all the possible states
# are binded.
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists = lists,
mc_type = mc_type, mc_state = mc_state,
ishandlerrunning = self.ishandlerrunning,
doafterhandler = self.doafterhandler):
ishandlerrunning[:] = [True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
# Call all functions in doafterhandler and remove them from list
while doafterhandler:
doafterhandler.pop()()
if r:
return r
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [[] for s in _states]}
self.handlerids = []
# we don't want to change the lists of functions while a handler is
# running - it will mess up the loop and anyway, we usually want the
# change to happen from the next event. So we have a list of functions
# for the handler to run after it finishes calling the binded functions.
# It calls them only once.
# ishandlerrunning is a list. An empty one means no, otherwise - yes.
# this is done so that it would be mutable.
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<'+_state_names[s]+self.typename+'>'
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
def bind(self, triplet, func):
if not self.bindedfuncs.has_key(triplet[2]):
self.bindedfuncs[triplet[2]] = [[] for s in _states]
for s in _states:
lists = [ self.bindedfuncs[detail][i]
for detail in (triplet[2], None)
for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type,
_state_codes[s])
seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def unbind(self, triplet, func):
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
self.widget.unbind(self.widgetinst, seq, id)
# define the list of event types to be handled by MultiEvent. the order is
# compatible with the definition of event type constants.
_types = (
("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
("Visibility",),
)
# which binder should be used for every event type?
_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
# A dictionary to map a type name into its number
_type_names = dict([(name, number)
for number in range(len(_types))
for name in _types[number]])
_keysym_re = re.compile(r"^\w+$")
_button_re = re.compile(r"^[1-5]$")
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = string.split(sequence[1:-1], '-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
triplet[2]+'>'
else:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall (widget):
assert issubclass(widget, Tkinter.Misc)
def __init__(self, *args, **kwargs):
apply(widget.__init__, (self,)+args, kwargs)
# a dictionary which maps a virtual event to a tuple with:
# 0. the function binded
# 1. a list of triplets - the sequences it is binded to
self.__eventinfo = {}
self.__binders = [_binder_classes[i](i, widget, self)
for i in range(len(_types))]
def bind(self, sequence=None, func=None, add=None):
#print "bind(%s, %s, %s) called." % (sequence, func, add)
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>":
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>" and \
sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
#print "event_add(%s,%s) was called"%(repr(virtual),repr(sequences))
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print >> sys.stderr, "Seq. %s was added by Tkinter."%seq
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print >> sys.stderr, "Seq. %s was deleted by Tkinter."%seq
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence,
self.__eventinfo[virtual][1])) + \
widget.event_info(self, virtual)
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
_multicall_dict[widget] = MultiCall
return MultiCall
if __name__ == "__main__":
# Test
root = Tkinter.Tk()
text = MultiCallCreator(Tkinter.Text)(root)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print seq
text.bind("<<handler%d>>"%n[0], handler)
text.event_add("<<handler%d>>"%n[0], seq)
n[0] += 1
bindseq("<Key>")
bindseq("<Control-Key>")
bindseq("<Alt-Key-a>")
bindseq("<Control-Key-a>")
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
root.mainloop()
| lgpl-2.1 |
skycucumber/restful | python/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| gpl-2.0 |
kernevil/samba | python/samba/kcc/__init__.py | 1 | 115581 | # define the KCC object
#
# Copyright (C) Dave Craft 2011
# Copyright (C) Andrew Bartlett 2015
#
# Andrew Bartlett's alleged work performed by his underlings Douglas
# Bagnall and Garming Sam.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import random
import uuid
import itertools
from samba import unix2nttime, nttime2unix
from samba import ldb, dsdb, drs_utils
from samba.auth import system_session
from samba.samdb import SamDB
from samba.dcerpc import drsuapi, misc
from samba.kcc.kcc_utils import Site, Partition, Transport, SiteLink
from samba.kcc.kcc_utils import NCReplica, NCType, nctype_lut, GraphNode
from samba.kcc.kcc_utils import RepsFromTo, KCCError, KCCFailedObject
from samba.kcc.graph import convert_schedule_to_repltimes
from samba.ndr import ndr_pack
from samba.kcc.graph_utils import verify_and_dot
from samba.kcc import ldif_import_export
from samba.kcc.graph import setup_graph, get_spanning_tree_edges
from samba.kcc.graph import Vertex
from samba.kcc.debug import DEBUG, DEBUG_FN, logger
from samba.kcc import debug
from samba.common import cmp
def sort_dsa_by_gc_and_guid(dsa1, dsa2):
"""Helper to sort DSAs by guid global catalog status
GC DSAs come before non-GC DSAs, other than that, the guids are
sorted in NDR form.
:param dsa1: A DSA object
:param dsa2: Another DSA
:return: -1, 0, or 1, indicating sort order.
"""
if dsa1.is_gc() and not dsa2.is_gc():
return -1
if not dsa1.is_gc() and dsa2.is_gc():
return +1
return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
def is_smtp_replication_available():
"""Can the KCC use SMTP replication?
Currently always returns false because Samba doesn't implement
SMTP transfer for NC changes between DCs.
:return: Boolean (always False)
"""
return False
class KCC(object):
"""The Knowledge Consistency Checker class.
A container for objects and methods allowing a run of the KCC. Produces a
set of connections in the samdb for which the Distributed Replication
Service can then utilize to replicate naming contexts
:param unix_now: The putative current time in seconds since 1970.
:param readonly: Don't write to the database.
:param verify: Check topological invariants for the generated graphs
:param debug: Write verbosely to stderr.
:param dot_file_dir: write diagnostic Graphviz files in this directory
"""
def __init__(self, unix_now, readonly=False, verify=False, debug=False,
dot_file_dir=None):
"""Initializes the partitions class which can hold
our local DCs partitions or all the partitions in
the forest
"""
self.part_table = {} # partition objects
self.site_table = {}
self.ip_transport = None
self.sitelink_table = {}
self.dsa_by_dnstr = {}
self.dsa_by_guid = {}
self.get_dsa_by_guidstr = self.dsa_by_guid.get
self.get_dsa = self.dsa_by_dnstr.get
# TODO: These should be backed by a 'permanent' store so that when
# calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
# the failure information can be returned
self.kcc_failed_links = {}
self.kcc_failed_connections = set()
# Used in inter-site topology computation. A list
# of connections (by NTDSConnection object) that are
# to be kept when pruning un-needed NTDS Connections
self.kept_connections = set()
self.my_dsa_dnstr = None # My dsa DN
self.my_dsa = None # My dsa object
self.my_site_dnstr = None
self.my_site = None
self.samdb = None
self.unix_now = unix_now
self.nt_now = unix2nttime(unix_now)
self.readonly = readonly
self.verify = verify
self.debug = debug
self.dot_file_dir = dot_file_dir
def load_ip_transport(self):
"""Loads the inter-site transport objects for Sites
:return: None
:raise KCCError: if no IP transport is found
"""
try:
res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
self.samdb.get_config_basedn(),
scope=ldb.SCOPE_SUBTREE,
expression="(objectClass=interSiteTransport)")
except ldb.LdbError as e2:
(enum, estr) = e2.args
raise KCCError("Unable to find inter-site transports - (%s)" %
estr)
for msg in res:
dnstr = str(msg.dn)
transport = Transport(dnstr)
transport.load_transport(self.samdb)
if transport.name == 'IP':
self.ip_transport = transport
elif transport.name == 'SMTP':
logger.debug("Samba KCC is ignoring the obsolete "
"SMTP transport.")
else:
logger.warning("Samba KCC does not support the transport "
"called %r." % (transport.name,))
if self.ip_transport is None:
raise KCCError("there doesn't seem to be an IP transport")
def load_all_sitelinks(self):
"""Loads the inter-site siteLink objects
:return: None
:raise KCCError: if site-links aren't found
"""
try:
res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
self.samdb.get_config_basedn(),
scope=ldb.SCOPE_SUBTREE,
expression="(objectClass=siteLink)")
except ldb.LdbError as e3:
(enum, estr) = e3.args
raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr)
for msg in res:
dnstr = str(msg.dn)
# already loaded
if dnstr in self.sitelink_table:
continue
sitelink = SiteLink(dnstr)
sitelink.load_sitelink(self.samdb)
# Assign this siteLink to table
# and index by dn
self.sitelink_table[dnstr] = sitelink
def load_site(self, dn_str):
"""Helper for load_my_site and load_all_sites.
Put all the site's DSAs into the KCC indices.
:param dn_str: a site dn_str
:return: the Site object pertaining to the dn_str
"""
site = Site(dn_str, self.unix_now)
site.load_site(self.samdb)
# We avoid replacing the site with an identical copy in case
# somewhere else has a reference to the old one, which would
# lead to all manner of confusion and chaos.
guid = str(site.site_guid)
if guid not in self.site_table:
self.site_table[guid] = site
self.dsa_by_dnstr.update(site.dsa_table)
self.dsa_by_guid.update((str(x.dsa_guid), x)
for x in site.dsa_table.values())
return self.site_table[guid]
def load_my_site(self):
"""Load the Site object for the local DSA.
:return: None
"""
self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % (
self.samdb.server_site_name(),
self.samdb.get_config_basedn()))
self.my_site = self.load_site(self.my_site_dnstr)
def load_all_sites(self):
"""Discover all sites and create Site objects.
:return: None
:raise: KCCError if sites can't be found
"""
try:
res = self.samdb.search("CN=Sites,%s" %
self.samdb.get_config_basedn(),
scope=ldb.SCOPE_SUBTREE,
expression="(objectClass=site)")
except ldb.LdbError as e4:
(enum, estr) = e4.args
raise KCCError("Unable to find sites - (%s)" % estr)
for msg in res:
sitestr = str(msg.dn)
self.load_site(sitestr)
def load_my_dsa(self):
"""Discover my nTDSDSA dn thru the rootDSE entry
:return: None
:raise: KCCError if DSA can't be found
"""
dn_query = "<GUID=%s>" % self.samdb.get_ntds_GUID()
dn = ldb.Dn(self.samdb, dn_query)
try:
res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
attrs=["objectGUID"])
except ldb.LdbError as e5:
(enum, estr) = e5.args
DEBUG_FN("Search for dn '%s' [from %s] failed: %s. "
"This typically happens in --importldif mode due "
"to lack of module support." % (dn, dn_query, estr))
try:
# We work around the failure above by looking at the
# dsServiceName that was put in the fake rootdse by
# the --exportldif, rather than the
# samdb.get_ntds_GUID(). The disadvantage is that this
# mode requires we modify the @ROOTDSE dnq to support
# --forced-local-dsa
service_name_res = self.samdb.search(base="",
scope=ldb.SCOPE_BASE,
attrs=["dsServiceName"])
dn = ldb.Dn(self.samdb,
service_name_res[0]["dsServiceName"][0].decode('utf8'))
res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
attrs=["objectGUID"])
except ldb.LdbError as e:
(enum, estr) = e.args
raise KCCError("Unable to find my nTDSDSA - (%s)" % estr)
if len(res) != 1:
raise KCCError("Unable to find my nTDSDSA at %s" %
dn.extended_str())
ntds_guid = misc.GUID(self.samdb.get_ntds_GUID())
if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid:
raise KCCError("Did not find the GUID we expected,"
" perhaps due to --importldif")
self.my_dsa_dnstr = str(res[0].dn)
self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
if self.my_dsa_dnstr not in self.dsa_by_dnstr:
debug.DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:"
" it must be RODC.\n"
"Let's add it, because my_dsa is special!"
"\n(likewise for self.dsa_by_guid)" %
self.my_dsa_dnstr)
self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa
self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa
def load_all_partitions(self):
"""Discover and load all partitions.
Each NC is inserted into the part_table by partition
dn string (not the nCName dn string)
:return: None
:raise: KCCError if partitions can't be found
"""
try:
res = self.samdb.search("CN=Partitions,%s" %
self.samdb.get_config_basedn(),
scope=ldb.SCOPE_SUBTREE,
expression="(objectClass=crossRef)")
except ldb.LdbError as e6:
(enum, estr) = e6.args
raise KCCError("Unable to find partitions - (%s)" % estr)
for msg in res:
partstr = str(msg.dn)
# already loaded
if partstr in self.part_table:
continue
part = Partition(partstr)
part.load_partition(self.samdb)
self.part_table[partstr] = part
def refresh_failed_links_connections(self, ping=None):
"""Ensure the failed links list is up to date
Based on MS-ADTS 6.2.2.1
:param ping: An oracle function of remote site availability
:return: None
"""
# LINKS: Refresh failed links
self.kcc_failed_links = {}
current, needed = self.my_dsa.get_rep_tables()
for replica in current.values():
# For every possible connection to replicate
for reps_from in replica.rep_repsFrom:
failure_count = reps_from.consecutive_sync_failures
if failure_count <= 0:
continue
dsa_guid = str(reps_from.source_dsa_obj_guid)
time_first_failure = reps_from.last_success
last_result = reps_from.last_attempt
dns_name = reps_from.dns_name1
f = self.kcc_failed_links.get(dsa_guid)
if f is None:
f = KCCFailedObject(dsa_guid, failure_count,
time_first_failure, last_result,
dns_name)
self.kcc_failed_links[dsa_guid] = f
else:
f.failure_count = max(f.failure_count, failure_count)
f.time_first_failure = min(f.time_first_failure,
time_first_failure)
f.last_result = last_result
# CONNECTIONS: Refresh failed connections
restore_connections = set()
if ping is not None:
DEBUG("refresh_failed_links: checking if links are still down")
for connection in self.kcc_failed_connections:
if ping(connection.dns_name):
# Failed connection is no longer failing
restore_connections.add(connection)
else:
connection.failure_count += 1
else:
DEBUG("refresh_failed_links: not checking live links because we\n"
"weren't asked to --attempt-live-connections")
# Remove the restored connections from the failed connections
self.kcc_failed_connections.difference_update(restore_connections)
def is_stale_link_connection(self, target_dsa):
"""Check whether a link to a remote DSA is stale
Used in MS-ADTS 6.2.2.2 Intrasite Connection Creation
Returns True if the remote seems to have been down for at
least two hours, otherwise False.
:param target_dsa: the remote DSA object
:return: True if link is stale, otherwise False
"""
failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
if failed_link:
# failure_count should be > 0, but check anyways
if failed_link.failure_count > 0:
unix_first_failure = \
nttime2unix(failed_link.time_first_failure)
# TODO guard against future
if unix_first_failure > self.unix_now:
logger.error("The last success time attribute for \
repsFrom is in the future!")
# Perform calculation in seconds
if (self.unix_now - unix_first_failure) > 60 * 60 * 2:
return True
# TODO connections.
# We have checked failed *links*, but we also need to check
# *connections*
return False
# TODO: This should be backed by some form of local database
def remove_unneeded_failed_links_connections(self):
# Remove all tuples in kcc_failed_links where failure count = 0
# In this implementation, this should never happen.
# Remove all connections which were not used this run or connections
# that became active during this run.
pass
def _ensure_connections_are_loaded(self, connections):
"""Load or fake-load NTDSConnections lacking GUIDs
New connections don't have GUIDs and created times which are
needed for sorting. If we're in read-only mode, we make fake
GUIDs, otherwise we ask SamDB to do it for us.
:param connections: an iterable of NTDSConnection objects.
:return: None
"""
for cn_conn in connections:
if cn_conn.guid is None:
if self.readonly:
cn_conn.guid = misc.GUID(str(uuid.uuid4()))
cn_conn.whenCreated = self.nt_now
else:
cn_conn.load_connection(self.samdb)
def _mark_broken_ntdsconn(self):
"""Find NTDS Connections that lack a remote
I'm not sure how they appear. Let's be rid of them by marking
them with the to_be_deleted attribute.
:return: None
"""
for cn_conn in self.my_dsa.connect_table.values():
s_dnstr = cn_conn.get_from_dnstr()
if s_dnstr is None:
DEBUG_FN("%s has phantom connection %s" % (self.my_dsa,
cn_conn))
cn_conn.to_be_deleted = True
def _mark_unneeded_local_ntdsconn(self):
"""Find unneeded intrasite NTDS Connections for removal
Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections.
Every DC removes its own unnecessary intrasite connections.
This function tags them with the to_be_deleted attribute.
:return: None
"""
# XXX should an RODC be regarded as same site? It isn't part
# of the intrasite ring.
if self.my_site.is_cleanup_ntdsconn_disabled():
DEBUG_FN("not doing ntdsconn cleanup for site %s, "
"because it is disabled" % self.my_site)
return
mydsa = self.my_dsa
try:
self._ensure_connections_are_loaded(mydsa.connect_table.values())
except KCCError:
# RODC never actually added any connections to begin with
if mydsa.is_ro():
return
local_connections = []
for cn_conn in mydsa.connect_table.values():
s_dnstr = cn_conn.get_from_dnstr()
if s_dnstr in self.my_site.dsa_table:
removable = not (cn_conn.is_generated() or
cn_conn.is_rodc_topology())
packed_guid = ndr_pack(cn_conn.guid)
local_connections.append((cn_conn, s_dnstr,
packed_guid, removable))
# Avoid "ValueError: r cannot be bigger than the iterable" in
# for a, b in itertools.permutations(local_connections, 2):
if (len(local_connections) < 2):
return
for a, b in itertools.permutations(local_connections, 2):
cn_conn, s_dnstr, packed_guid, removable = a
cn_conn2, s_dnstr2, packed_guid2, removable2 = b
if (removable and
s_dnstr == s_dnstr2 and
cn_conn.whenCreated < cn_conn2.whenCreated or
(cn_conn.whenCreated == cn_conn2.whenCreated and
packed_guid < packed_guid2)):
cn_conn.to_be_deleted = True
def _mark_unneeded_intersite_ntdsconn(self):
"""find unneeded intersite NTDS Connections for removal
Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections. The
intersite topology generator removes links for all DCs in its
site. Here we just tag them with the to_be_deleted attribute.
:return: None
"""
# TODO Figure out how best to handle the RODC case
# The RODC is ISTG, but shouldn't act on anyone's behalf.
if self.my_dsa.is_ro():
return
# Find the intersite connections
local_dsas = self.my_site.dsa_table
connections_and_dsas = []
for dsa in local_dsas.values():
for cn in dsa.connect_table.values():
if cn.to_be_deleted:
continue
s_dnstr = cn.get_from_dnstr()
if s_dnstr is None:
continue
if s_dnstr not in local_dsas:
from_dsa = self.get_dsa(s_dnstr)
# Samba ONLY: ISTG removes connections to dead DCs
if from_dsa is None or '\\0ADEL' in s_dnstr:
logger.info("DSA appears deleted, removing connection %s"
% s_dnstr)
cn.to_be_deleted = True
continue
connections_and_dsas.append((cn, dsa, from_dsa))
self._ensure_connections_are_loaded(x[0] for x in connections_and_dsas)
for cn, to_dsa, from_dsa in connections_and_dsas:
if not cn.is_generated() or cn.is_rodc_topology():
continue
# If the connection is in the kept_connections list, we
# only remove it if an endpoint seems down.
if (cn in self.kept_connections and
not (self.is_bridgehead_failed(to_dsa, True) or
self.is_bridgehead_failed(from_dsa, True))):
continue
# this one is broken and might be superseded by another.
# But which other? Let's just say another link to the same
# site can supersede.
from_dnstr = from_dsa.dsa_dnstr
for site in self.site_table.values():
if from_dnstr in site.rw_dsa_table:
for cn2, to_dsa2, from_dsa2 in connections_and_dsas:
if (cn is not cn2 and
from_dsa2 in site.rw_dsa_table):
cn.to_be_deleted = True
def _commit_changes(self, dsa):
if dsa.is_ro() or self.readonly:
for connect in dsa.connect_table.values():
if connect.to_be_deleted:
logger.info("TO BE DELETED:\n%s" % connect)
if connect.to_be_added:
logger.info("TO BE ADDED:\n%s" % connect)
if connect.to_be_modified:
logger.info("TO BE MODIFIED:\n%s" % connect)
# Peform deletion from our tables but perform
# no database modification
dsa.commit_connections(self.samdb, ro=True)
else:
# Commit any modified connections
dsa.commit_connections(self.samdb)
def remove_unneeded_ntdsconn(self, all_connected):
"""Remove unneeded NTDS Connections once topology is calculated
Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections
:param all_connected: indicates whether all sites are connected
:return: None
"""
self._mark_broken_ntdsconn()
self._mark_unneeded_local_ntdsconn()
# if we are not the istg, we're done!
# if we are the istg, but all_connected is False, we also do nothing.
if self.my_dsa.is_istg() and all_connected:
self._mark_unneeded_intersite_ntdsconn()
for dsa in self.my_site.dsa_table.values():
self._commit_changes(dsa)
def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
"""Update an repsFrom object if required.
Part of MS-ADTS 6.2.2.5.
Update t_repsFrom if necessary to satisfy requirements. Such
updates are typically required when the IDL_DRSGetNCChanges
server has moved from one site to another--for example, to
enable compression when the server is moved from the
client's site to another site.
The repsFrom.update_flags bit field may be modified
auto-magically if any changes are made here. See
kcc_utils.RepsFromTo for gory details.
:param n_rep: NC replica we need
:param t_repsFrom: repsFrom tuple to modify
:param s_rep: NC replica at source DSA
:param s_dsa: source DSA
:param cn_conn: Local DSA NTDSConnection child
:return: None
"""
s_dnstr = s_dsa.dsa_dnstr
same_site = s_dnstr in self.my_site.dsa_table
# if schedule doesn't match then update and modify
times = convert_schedule_to_repltimes(cn_conn.schedule)
if times != t_repsFrom.schedule:
t_repsFrom.schedule = times
# Bit DRS_ADD_REF is set in replicaFlags unconditionally
# Samba ONLY:
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_ADD_REF) == 0x0):
t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_ADD_REF
# Bit DRS_PER_SYNC is set in replicaFlags if and only
# if nTDSConnection schedule has a value v that specifies
# scheduled replication is to be performed at least once
# per week.
if cn_conn.is_schedule_minimum_once_per_week():
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0):
t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
# Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
# if the source DSA and the local DC's nTDSDSA object are
# in the same site or source dsa is the FSMO role owner
# of one or more FSMO roles in the NC replica.
if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0):
t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
# If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
# cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
# if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
# cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
# t.replicaFlags if and only if s and the local DC's
# nTDSDSA object are in different sites.
if ((cn_conn.options &
dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0):
if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
# WARNING
#
# it LOOKS as if this next test is a bit silly: it
# checks the flag then sets it if it not set; the same
# effect could be achieved by unconditionally setting
# it. But in fact the repsFrom object has special
# magic attached to it, and altering replica_flags has
# side-effects. That is bad in my opinion, but there
# you go.
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
t_repsFrom.replica_flags |= \
drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
elif not same_site:
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
# Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
# and only if s and the local DC's nTDSDSA object are
# not in the same site and the
# NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
# clear in cn!options
if (not same_site and
(cn_conn.options &
dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0):
t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
# Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
# if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0):
t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
# Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
# set in t.replicaFlags if and only if cn!enabledConnection = false.
if not cn_conn.is_enabled():
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0):
t_repsFrom.replica_flags |= \
drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0):
t_repsFrom.replica_flags |= \
drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
# If s and the local DC's nTDSDSA object are in the same site,
# cn!transportType has no value, or the RDN of cn!transportType
# is CN=IP:
#
# Bit DRS_MAIL_REP in t.replicaFlags is clear.
#
# t.uuidTransport = NULL GUID.
#
# t.uuidDsa = The GUID-based DNS name of s.
#
# Otherwise:
#
# Bit DRS_MAIL_REP in t.replicaFlags is set.
#
# If x is the object with dsname cn!transportType,
# t.uuidTransport = x!objectGUID.
#
# Let a be the attribute identified by
# x!transportAddressAttribute. If a is
# the dNSHostName attribute, t.uuidDsa = the GUID-based
# DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
#
# It appears that the first statement i.e.
#
# "If s and the local DC's nTDSDSA object are in the same
# site, cn!transportType has no value, or the RDN of
# cn!transportType is CN=IP:"
#
# could be a slightly tighter statement if it had an "or"
# between each condition. I believe this should
# be interpreted as:
#
# IF (same-site) OR (no-value) OR (type-ip)
#
# because IP should be the primary transport mechanism
# (even in inter-site) and the absense of the transportType
# attribute should always imply IP no matter if its multi-site
#
# NOTE MS-TECH INCORRECT:
#
# All indications point to these statements above being
# incorrectly stated:
#
# t.uuidDsa = The GUID-based DNS name of s.
#
# Let a be the attribute identified by
# x!transportAddressAttribute. If a is
# the dNSHostName attribute, t.uuidDsa = the GUID-based
# DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
#
# because the uuidDSA is a GUID and not a GUID-base DNS
# name. Nor can uuidDsa hold (s!parent)!a if not
# dNSHostName. What should have been said is:
#
# t.naDsa = The GUID-based DNS name of s
#
# That would also be correct if transportAddressAttribute
# were "mailAddress" because (naDsa) can also correctly
# hold the SMTP ISM service address.
#
nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
if ((t_repsFrom.replica_flags &
drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0):
t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
t_repsFrom.transport_guid = misc.GUID()
# See (NOTE MS-TECH INCORRECT) above
# NOTE: it looks like these conditionals are pointless,
# because the state will end up as `t_repsFrom.dns_name1 ==
# nastr` in either case, BUT the repsFrom thing is magic and
# assigning to it alters some flags. So we try not to update
# it unless necessary.
if t_repsFrom.dns_name1 != nastr:
t_repsFrom.dns_name1 = nastr
if t_repsFrom.version > 0x1 and t_repsFrom.dns_name2 != nastr:
t_repsFrom.dns_name2 = nastr
if t_repsFrom.is_modified():
DEBUG_FN("modify_repsFrom(): %s" % t_repsFrom)
def get_dsa_for_implied_replica(self, n_rep, cn_conn):
"""If a connection imply a replica, find the relevant DSA
Given a NC replica and NTDS Connection, determine if the
connection implies a repsFrom tuple should be present from the
source DSA listed in the connection to the naming context. If
it should be, return the DSA; otherwise return None.
Based on part of MS-ADTS 6.2.2.5
:param n_rep: NC replica
:param cn_conn: NTDS Connection
:return: source DSA or None
"""
# XXX different conditions for "implies" than MS-ADTS 6.2.2
# preamble.
# It boils down to: we want an enabled, non-FRS connections to
# a valid remote DSA with a non-RO replica corresponding to
# n_rep.
if not cn_conn.is_enabled() or cn_conn.is_rodc_topology():
return None
s_dnstr = cn_conn.get_from_dnstr()
s_dsa = self.get_dsa(s_dnstr)
# No DSA matching this source DN string?
if s_dsa is None:
return None
s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
if (s_rep is not None and
s_rep.is_present() and
(not s_rep.is_ro() or n_rep.is_partial())):
return s_dsa
return None
def translate_ntdsconn(self, current_dsa=None):
"""Adjust repsFrom to match NTDSConnections
This function adjusts values of repsFrom abstract attributes of NC
replicas on the local DC to match those implied by
nTDSConnection objects.
Based on [MS-ADTS] 6.2.2.5
:param current_dsa: optional DSA on whose behalf we are acting.
:return: None
"""
ro = False
if current_dsa is None:
current_dsa = self.my_dsa
if current_dsa.is_ro():
ro = True
if current_dsa.is_translate_ntdsconn_disabled():
DEBUG_FN("skipping translate_ntdsconn() "
"because disabling flag is set")
return
DEBUG_FN("translate_ntdsconn(): enter")
current_rep_table, needed_rep_table = current_dsa.get_rep_tables()
# Filled in with replicas we currently have that need deleting
delete_reps = set()
# We're using the MS notation names here to allow
# correlation back to the published algorithm.
#
# n_rep - NC replica (n)
# t_repsFrom - tuple (t) in n!repsFrom
# s_dsa - Source DSA of the replica. Defined as nTDSDSA
# object (s) such that (s!objectGUID = t.uuidDsa)
# In our IDL representation of repsFrom the (uuidDsa)
# attribute is called (source_dsa_obj_guid)
# cn_conn - (cn) is nTDSConnection object and child of the local
# DC's nTDSDSA object and (cn!fromServer = s)
# s_rep - source DSA replica of n
#
# If we have the replica and its not needed
# then we add it to the "to be deleted" list.
for dnstr in current_rep_table:
# If we're on the RODC, hardcode the update flags
if ro:
c_rep = current_rep_table[dnstr]
c_rep.load_repsFrom(self.samdb)
for t_repsFrom in c_rep.rep_repsFrom:
replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
drsuapi.DRSUAPI_DRS_PER_SYNC |
drsuapi.DRSUAPI_DRS_ADD_REF |
drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING |
drsuapi.DRSUAPI_DRS_NONGC_RO_REP)
if t_repsFrom.replica_flags != replica_flags:
t_repsFrom.replica_flags = replica_flags
c_rep.commit_repsFrom(self.samdb, ro=self.readonly)
else:
if dnstr not in needed_rep_table:
delete_reps.add(dnstr)
DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table),
len(needed_rep_table), len(delete_reps)))
if delete_reps:
# TODO Must delete repsFrom/repsTo for these replicas
DEBUG('deleting these reps: %s' % delete_reps)
for dnstr in delete_reps:
del current_rep_table[dnstr]
# HANDLE REPS-FROM
#
# Now perform the scan of replicas we'll need
# and compare any current repsFrom against the
# connections
for n_rep in needed_rep_table.values():
# load any repsFrom and fsmo roles as we'll
# need them during connection translation
n_rep.load_repsFrom(self.samdb)
n_rep.load_fsmo_roles(self.samdb)
# Loop thru the existing repsFrom tuples (if any)
# XXX This is a list and could contain duplicates
# (multiple load_repsFrom calls)
for t_repsFrom in n_rep.rep_repsFrom:
# for each tuple t in n!repsFrom, let s be the nTDSDSA
# object such that s!objectGUID = t.uuidDsa
guidstr = str(t_repsFrom.source_dsa_obj_guid)
s_dsa = self.get_dsa_by_guidstr(guidstr)
# Source dsa is gone from config (strange)
# so cleanup stale repsFrom for unlisted DSA
if s_dsa is None:
logger.warning("repsFrom source DSA guid (%s) not found" %
guidstr)
t_repsFrom.to_be_deleted = True
continue
# Find the connection that this repsFrom would use. If
# there isn't a good one (i.e. non-RODC_TOPOLOGY,
# meaning non-FRS), we delete the repsFrom.
s_dnstr = s_dsa.dsa_dnstr
connections = current_dsa.get_connection_by_from_dnstr(s_dnstr)
for cn_conn in connections:
if not cn_conn.is_rodc_topology():
break
else:
# no break means no non-rodc_topology connection exists
t_repsFrom.to_be_deleted = True
continue
# KCC removes this repsFrom tuple if any of the following
# is true:
# No NC replica of the NC "is present" on DSA that
# would be source of replica
#
# A writable replica of the NC "should be present" on
# the local DC, but a partial replica "is present" on
# the source DSA
s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
if s_rep is None or not s_rep.is_present() or \
(not n_rep.is_ro() and s_rep.is_partial()):
t_repsFrom.to_be_deleted = True
continue
# If the KCC did not remove t from n!repsFrom, it updates t
self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
# Loop thru connections and add implied repsFrom tuples
# for each NTDSConnection under our local DSA if the
# repsFrom is not already present
for cn_conn in current_dsa.connect_table.values():
s_dsa = self.get_dsa_for_implied_replica(n_rep, cn_conn)
if s_dsa is None:
continue
# Loop thru the existing repsFrom tuples (if any) and
# if we already have a tuple for this connection then
# no need to proceed to add. It will have been changed
# to have the correct attributes above
for t_repsFrom in n_rep.rep_repsFrom:
guidstr = str(t_repsFrom.source_dsa_obj_guid)
if s_dsa is self.get_dsa_by_guidstr(guidstr):
s_dsa = None
break
if s_dsa is None:
continue
# Create a new RepsFromTo and proceed to modify
# it according to specification
t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
# Add to our NC repsFrom as this is newly computed
if t_repsFrom.is_modified():
n_rep.rep_repsFrom.append(t_repsFrom)
if self.readonly or ro:
# Display any to be deleted or modified repsFrom
text = n_rep.dumpstr_to_be_deleted()
if text:
logger.info("TO BE DELETED:\n%s" % text)
text = n_rep.dumpstr_to_be_modified()
if text:
logger.info("TO BE MODIFIED:\n%s" % text)
# Peform deletion from our tables but perform
# no database modification
n_rep.commit_repsFrom(self.samdb, ro=True)
else:
# Commit any modified repsFrom to the NC replica
n_rep.commit_repsFrom(self.samdb)
# HANDLE REPS-TO:
#
# Now perform the scan of replicas we'll need
# and compare any current repsTo against the
# connections
# RODC should never push to anybody (should we check this?)
if ro:
return
for n_rep in needed_rep_table.values():
# load any repsTo and fsmo roles as we'll
# need them during connection translation
n_rep.load_repsTo(self.samdb)
# Loop thru the existing repsTo tuples (if any)
# XXX This is a list and could contain duplicates
# (multiple load_repsTo calls)
for t_repsTo in n_rep.rep_repsTo:
# for each tuple t in n!repsTo, let s be the nTDSDSA
# object such that s!objectGUID = t.uuidDsa
guidstr = str(t_repsTo.source_dsa_obj_guid)
s_dsa = self.get_dsa_by_guidstr(guidstr)
# Source dsa is gone from config (strange)
# so cleanup stale repsTo for unlisted DSA
if s_dsa is None:
logger.warning("repsTo source DSA guid (%s) not found" %
guidstr)
t_repsTo.to_be_deleted = True
continue
# Find the connection that this repsTo would use. If
# there isn't a good one (i.e. non-RODC_TOPOLOGY,
# meaning non-FRS), we delete the repsTo.
s_dnstr = s_dsa.dsa_dnstr
if '\\0ADEL' in s_dnstr:
logger.warning("repsTo source DSA guid (%s) appears deleted" %
guidstr)
t_repsTo.to_be_deleted = True
continue
connections = s_dsa.get_connection_by_from_dnstr(self.my_dsa_dnstr)
if len(connections) > 0:
# Then this repsTo is tentatively valid
continue
else:
# There is no plausible connection for this repsTo
t_repsTo.to_be_deleted = True
if self.readonly:
# Display any to be deleted or modified repsTo
for rt in n_rep.rep_repsTo:
if rt.to_be_deleted:
logger.info("REMOVING REPS-TO: %s" % rt)
# Peform deletion from our tables but perform
# no database modification
n_rep.commit_repsTo(self.samdb, ro=True)
else:
# Commit any modified repsTo to the NC replica
n_rep.commit_repsTo(self.samdb)
# TODO Remove any duplicate repsTo values. This should never happen in
# any normal situations.
def merge_failed_links(self, ping=None):
"""Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
The KCC on a writable DC attempts to merge the link and connection
failure information from bridgehead DCs in its own site to help it
identify failed bridgehead DCs.
Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks
from Bridgeheads"
:param ping: An oracle of current bridgehead availability
:return: None
"""
# 1. Queries every bridgehead server in your site (other than yourself)
# 2. For every ntDSConnection that references a server in a different
# site merge all the failure info
#
# XXX - not implemented yet
if ping is not None:
debug.DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
else:
DEBUG_FN("skipping merge_failed_links() because it requires "
"real network connections\n"
"and we weren't asked to --attempt-live-connections")
def setup_graph(self, part):
"""Set up an intersite graph
An intersite graph has a Vertex for each site object, a
MultiEdge for each SiteLink object, and a MutliEdgeSet for
each siteLinkBridge object (or implied siteLinkBridge). It
reflects the intersite topology in a slightly more abstract
graph form.
Roughly corresponds to MS-ADTS 6.2.2.3.4.3
:param part: a Partition object
:returns: an InterSiteGraph object
"""
# If 'Bridge all site links' is enabled and Win2k3 bridges required
# is not set
# NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
# No documentation for this however, ntdsapi.h appears to have:
# NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
bridges_required = self.my_site.site_options & 0x00001002 != 0
transport_guid = str(self.ip_transport.guid)
g = setup_graph(part, self.site_table, transport_guid,
self.sitelink_table, bridges_required)
if self.verify or self.dot_file_dir is not None:
dot_edges = []
for edge in g.edges:
for a, b in itertools.combinations(edge.vertices, 2):
dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
verify_properties = ()
name = 'site_edges_%s' % part.partstr
verify_and_dot(name, dot_edges, directed=False,
label=self.my_dsa_dnstr,
properties=verify_properties, debug=DEBUG,
verify=self.verify,
dot_file_dir=self.dot_file_dir)
return g
def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
"""Get a bridghead DC for a site.
Part of MS-ADTS 6.2.2.3.4.4
:param site: site object representing for which a bridgehead
DC is desired.
:param part: crossRef for NC to replicate.
:param transport: interSiteTransport object for replication
traffic.
:param partial_ok: True if a DC containing a partial
replica or a full replica will suffice, False if only
a full replica will suffice.
:param detect_failed: True to detect failed DCs and route
replication traffic around them, False to assume no DC
has failed.
:return: dsa object for the bridgehead DC or None
"""
bhs = self.get_all_bridgeheads(site, part, transport,
partial_ok, detect_failed)
if not bhs:
debug.DEBUG_MAGENTA("get_bridgehead FAILED:\nsitedn = %s" %
site.site_dnstr)
return None
debug.DEBUG_GREEN("get_bridgehead:\n\tsitedn = %s\n\tbhdn = %s" %
(site.site_dnstr, bhs[0].dsa_dnstr))
return bhs[0]
def get_all_bridgeheads(self, site, part, transport,
partial_ok, detect_failed):
"""Get all bridghead DCs on a site satisfying the given criteria
Part of MS-ADTS 6.2.2.3.4.4
:param site: site object representing the site for which
bridgehead DCs are desired.
:param part: partition for NC to replicate.
:param transport: interSiteTransport object for
replication traffic.
:param partial_ok: True if a DC containing a partial
replica or a full replica will suffice, False if
only a full replica will suffice.
:param detect_failed: True to detect failed DCs and route
replication traffic around them, FALSE to assume
no DC has failed.
:return: list of dsa object for available bridgehead DCs
"""
bhs = []
if transport.name != "IP":
raise KCCError("get_all_bridgeheads has run into a "
"non-IP transport! %r"
% (transport.name,))
DEBUG_FN(site.rw_dsa_table)
for dsa in site.rw_dsa_table.values():
pdnstr = dsa.get_parent_dnstr()
# IF t!bridgeheadServerListBL has one or more values and
# t!bridgeheadServerListBL does not contain a reference
# to the parent object of dc then skip dc
if ((len(transport.bridgehead_list) != 0 and
pdnstr not in transport.bridgehead_list)):
continue
# IF dc is in the same site as the local DC
# IF a replica of cr!nCName is not in the set of NC replicas
# that "should be present" on dc or a partial replica of the
# NC "should be present" but partialReplicasOkay = FALSE
# Skip dc
if self.my_site.same_site(dsa):
needed, ro, partial = part.should_be_present(dsa)
if not needed or (partial and not partial_ok):
continue
rep = dsa.get_current_replica(part.nc_dnstr)
# ELSE
# IF an NC replica of cr!nCName is not in the set of NC
# replicas that "are present" on dc or a partial replica of
# the NC "is present" but partialReplicasOkay = FALSE
# Skip dc
else:
rep = dsa.get_current_replica(part.nc_dnstr)
if rep is None or (rep.is_partial() and not partial_ok):
continue
# IF AmIRODC() and cr!nCName corresponds to default NC then
# Let dsaobj be the nTDSDSA object of the dc
# IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
# Skip dc
if self.my_dsa.is_ro() and rep is not None and rep.is_default():
if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
continue
# IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
# Skip dc
if self.is_bridgehead_failed(dsa, detect_failed):
DEBUG("bridgehead is failed")
continue
DEBUG_FN("found a bridgehead: %s" % dsa.dsa_dnstr)
bhs.append(dsa)
# IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
# s!options
# SORT bhs such that all GC servers precede DCs that are not GC
# servers, and otherwise by ascending objectGUID
# ELSE
# SORT bhs in a random order
if site.is_random_bridgehead_disabled():
bhs.sort(sort_dsa_by_gc_and_guid)
else:
random.shuffle(bhs)
debug.DEBUG_YELLOW(bhs)
return bhs
def is_bridgehead_failed(self, dsa, detect_failed):
"""Determine whether a given DC is known to be in a failed state
:param dsa: the bridgehead to test
:param detect_failed: True to really check, False to assume no failure
:return: True if and only if the DC should be considered failed
Here we DEPART from the pseudo code spec which appears to be
wrong. It says, in full:
/***** BridgeheadDCFailed *****/
/* Determine whether a given DC is known to be in a failed state.
* IN: objectGUID - objectGUID of the DC's nTDSDSA object.
* IN: detectFailedDCs - TRUE if and only failed DC detection is
* enabled.
* RETURNS: TRUE if and only if the DC should be considered to be in a
* failed state.
*/
BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
{
IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
the options attribute of the site settings object for the local
DC's site
RETURN FALSE
ELSEIF a tuple z exists in the kCCFailedLinks or
kCCFailedConnections variables such that z.UUIDDsa =
objectGUID, z.FailureCount > 1, and the current time -
z.TimeFirstFailure > 2 hours
RETURN TRUE
ELSE
RETURN detectFailedDCs
ENDIF
}
where you will see detectFailedDCs is not behaving as
advertised -- it is acting as a default return code in the
event that a failure is not detected, not a switch turning
detection on or off. Elsewhere the documentation seems to
concur with the comment rather than the code.
"""
if not detect_failed:
return False
# NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
# When DETECT_STALE_DISABLED, we can never know of if
# it's in a failed state
if self.my_site.site_options & 0x00000008:
return False
return self.is_stale_link_connection(dsa)
def create_connection(self, part, rbh, rsite, transport,
lbh, lsite, link_opt, link_sched,
partial_ok, detect_failed):
"""Create an nTDSConnection object as specified if it doesn't exist.
Part of MS-ADTS 6.2.2.3.4.5
:param part: crossRef object for the NC to replicate.
:param rbh: nTDSDSA object for DC to act as the
IDL_DRSGetNCChanges server (which is in a site other
than the local DC's site).
:param rsite: site of the rbh
:param transport: interSiteTransport object for the transport
to use for replication traffic.
:param lbh: nTDSDSA object for DC to act as the
IDL_DRSGetNCChanges client (which is in the local DC's site).
:param lsite: site of the lbh
:param link_opt: Replication parameters (aggregated siteLink options,
etc.)
:param link_sched: Schedule specifying the times at which
to begin replicating.
:partial_ok: True if bridgehead DCs containing partial
replicas of the NC are acceptable.
:param detect_failed: True to detect failed DCs and route
replication traffic around them, FALSE to assume no DC
has failed.
"""
rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
partial_ok, False)
rbh_table = dict((x.dsa_dnstr, x) for x in rbhs_all)
debug.DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all),
[x.dsa_dnstr for x in rbhs_all]))
# MS-TECH says to compute rbhs_avail but then doesn't use it
# rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
# partial_ok, detect_failed)
lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
partial_ok, False)
if lbh.is_ro():
lbhs_all.append(lbh)
debug.DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all),
[x.dsa_dnstr for x in lbhs_all]))
# MS-TECH says to compute lbhs_avail but then doesn't use it
# lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
# partial_ok, detect_failed)
# FOR each nTDSConnection object cn such that the parent of cn is
# a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
for ldsa in lbhs_all:
for cn in ldsa.connect_table.values():
rdsa = rbh_table.get(cn.from_dnstr)
if rdsa is None:
continue
debug.DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
# IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
# NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
# cn!transportType references t
if ((cn.is_generated() and
not cn.is_rodc_topology() and
cn.transport_guid == transport.guid)):
# IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
# cn!options and cn!schedule != sch
# Perform an originating update to set cn!schedule to
# sched
if ((not cn.is_user_owned_schedule() and
not cn.is_equivalent_schedule(link_sched))):
cn.schedule = link_sched
cn.set_modified(True)
# IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
# NTDSCONN_OPT_USE_NOTIFY are set in cn
if cn.is_override_notify_default() and \
cn.is_use_notify():
# IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
# ri.Options
# Perform an originating update to clear bits
# NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
# NTDSCONN_OPT_USE_NOTIFY in cn!options
if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
cn.options &= \
~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
dsdb.NTDSCONN_OPT_USE_NOTIFY)
cn.set_modified(True)
# ELSE
else:
# IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
# ri.Options
# Perform an originating update to set bits
# NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
# NTDSCONN_OPT_USE_NOTIFY in cn!options
if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
cn.options |= \
(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
dsdb.NTDSCONN_OPT_USE_NOTIFY)
cn.set_modified(True)
# IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
if cn.is_twoway_sync():
# IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
# ri.Options
# Perform an originating update to clear bit
# NTDSCONN_OPT_TWOWAY_SYNC in cn!options
if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
cn.set_modified(True)
# ELSE
else:
# IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
# ri.Options
# Perform an originating update to set bit
# NTDSCONN_OPT_TWOWAY_SYNC in cn!options
if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
cn.set_modified(True)
# IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
# in cn!options
if cn.is_intersite_compression_disabled():
# IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
# in ri.Options
# Perform an originating update to clear bit
# NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
# cn!options
if ((link_opt &
dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0):
cn.options &= \
~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
cn.set_modified(True)
# ELSE
else:
# IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
# ri.Options
# Perform an originating update to set bit
# NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
# cn!options
if ((link_opt &
dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
cn.options |= \
dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
cn.set_modified(True)
# Display any modified connection
if self.readonly or ldsa.is_ro():
if cn.to_be_modified:
logger.info("TO BE MODIFIED:\n%s" % cn)
ldsa.commit_connections(self.samdb, ro=True)
else:
ldsa.commit_connections(self.samdb)
# ENDFOR
valid_connections = 0
# FOR each nTDSConnection object cn such that cn!parent is
# a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
for ldsa in lbhs_all:
for cn in ldsa.connect_table.values():
rdsa = rbh_table.get(cn.from_dnstr)
if rdsa is None:
continue
debug.DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
# IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
# cn!transportType references t) and
# NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
if (((not cn.is_generated() or
cn.transport_guid == transport.guid) and
not cn.is_rodc_topology())):
# LET rguid be the objectGUID of the nTDSDSA object
# referenced by cn!fromServer
# LET lguid be (cn!parent)!objectGUID
# IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
# BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
# Increment cValidConnections by 1
if ((not self.is_bridgehead_failed(rdsa, detect_failed) and
not self.is_bridgehead_failed(ldsa, detect_failed))):
valid_connections += 1
# IF keepConnections does not contain cn!objectGUID
# APPEND cn!objectGUID to keepConnections
self.kept_connections.add(cn)
# ENDFOR
debug.DEBUG_RED("valid connections %d" % valid_connections)
DEBUG("kept_connections:\n%s" % (self.kept_connections,))
# IF cValidConnections = 0
if valid_connections == 0:
# LET opt be NTDSCONN_OPT_IS_GENERATED
opt = dsdb.NTDSCONN_OPT_IS_GENERATED
# IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
# SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
# NTDSCONN_OPT_USE_NOTIFY in opt
if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
dsdb.NTDSCONN_OPT_USE_NOTIFY)
# IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
# SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
# IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
# ri.Options
# SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
if ((link_opt &
dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
# Perform an originating update to create a new nTDSConnection
# object cn that is a child of lbh, cn!enabledConnection = TRUE,
# cn!options = opt, cn!transportType is a reference to t,
# cn!fromServer is a reference to rbh, and cn!schedule = sch
DEBUG_FN("new connection, KCC dsa: %s" % self.my_dsa.dsa_dnstr)
system_flags = (dsdb.SYSTEM_FLAG_CONFIG_ALLOW_RENAME |
dsdb.SYSTEM_FLAG_CONFIG_ALLOW_MOVE)
cn = lbh.new_connection(opt, system_flags, transport,
rbh.dsa_dnstr, link_sched)
# Display any added connection
if self.readonly or lbh.is_ro():
if cn.to_be_added:
logger.info("TO BE ADDED:\n%s" % cn)
lbh.commit_connections(self.samdb, ro=True)
else:
lbh.commit_connections(self.samdb)
# APPEND cn!objectGUID to keepConnections
self.kept_connections.add(cn)
def add_transports(self, vertex, local_vertex, graph, detect_failed):
"""Build a Vertex's transport lists
Each vertex has accept_red_red and accept_black lists that
list what transports they accept under various conditions. The
only transport that is ever accepted is IP, and a dummy extra
transport called "EDGE_TYPE_ALL".
Part of MS-ADTS 6.2.2.3.4.3 -- ColorVertices
:param vertex: the remote vertex we are thinking about
:param local_vertex: the vertex relating to the local site.
:param graph: the intersite graph
:param detect_failed: whether to detect failed links
:return: True if some bridgeheads were not found
"""
# The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
# here, but using vertex seems to make more sense. That is,
# the docs want this:
#
# bh = self.get_bridgehead(local_vertex.site, vertex.part, transport,
# local_vertex.is_black(), detect_failed)
#
# TODO WHY?????
vertex.accept_red_red = []
vertex.accept_black = []
found_failed = False
if vertex in graph.connected_vertices:
t_guid = str(self.ip_transport.guid)
bh = self.get_bridgehead(vertex.site, vertex.part,
self.ip_transport,
vertex.is_black(), detect_failed)
if bh is None:
if vertex.site.is_rodc_site():
vertex.accept_red_red.append(t_guid)
else:
found_failed = True
else:
vertex.accept_red_red.append(t_guid)
vertex.accept_black.append(t_guid)
# Add additional transport to ensure another run of Dijkstra
vertex.accept_red_red.append("EDGE_TYPE_ALL")
vertex.accept_black.append("EDGE_TYPE_ALL")
return found_failed
def create_connections(self, graph, part, detect_failed):
"""Create intersite NTDSConnections as needed by a partition
Construct an NC replica graph for the NC identified by
the given crossRef, then create any additional nTDSConnection
objects required.
:param graph: site graph.
:param part: crossRef object for NC.
:param detect_failed: True to detect failed DCs and route
replication traffic around them, False to assume no DC
has failed.
Modifies self.kept_connections by adding any connections
deemed to be "in use".
:return: (all_connected, found_failed_dc)
(all_connected) True if the resulting NC replica graph
connects all sites that need to be connected.
(found_failed_dc) True if one or more failed DCs were
detected.
"""
all_connected = True
found_failed = False
DEBUG_FN("create_connections(): enter\n"
"\tpartdn=%s\n\tdetect_failed=%s" %
(part.nc_dnstr, detect_failed))
# XXX - This is a highly abbreviated function from the MS-TECH
# ref. It creates connections between bridgeheads to all
# sites that have appropriate replicas. Thus we are not
# creating a minimum cost spanning tree but instead
# producing a fully connected tree. This should produce
# a full (albeit not optimal cost) replication topology.
my_vertex = Vertex(self.my_site, part)
my_vertex.color_vertex()
for v in graph.vertices:
v.color_vertex()
if self.add_transports(v, my_vertex, graph, detect_failed):
found_failed = True
# No NC replicas for this NC in the site of the local DC,
# so no nTDSConnection objects need be created
if my_vertex.is_white():
return all_connected, found_failed
edge_list, n_components = get_spanning_tree_edges(graph,
self.my_site,
label=part.partstr)
DEBUG_FN("%s Number of components: %d" %
(part.nc_dnstr, n_components))
if n_components > 1:
all_connected = False
# LET partialReplicaOkay be TRUE if and only if
# localSiteVertex.Color = COLOR.BLACK
partial_ok = my_vertex.is_black()
# Utilize the IP transport only for now
transport = self.ip_transport
DEBUG("edge_list %s" % edge_list)
for e in edge_list:
# XXX more accurate comparison?
if e.directed and e.vertices[0].site is self.my_site:
continue
if e.vertices[0].site is self.my_site:
rsite = e.vertices[1].site
else:
rsite = e.vertices[0].site
# We don't make connections to our own site as that
# is intrasite topology generator's job
if rsite is self.my_site:
DEBUG("rsite is my_site")
continue
# Determine bridgehead server in remote site
rbh = self.get_bridgehead(rsite, part, transport,
partial_ok, detect_failed)
if rbh is None:
continue
# RODC acts as an BH for itself
# IF AmIRODC() then
# LET lbh be the nTDSDSA object of the local DC
# ELSE
# LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
# cr, t, partialReplicaOkay, detectFailedDCs)
if self.my_dsa.is_ro():
lsite = self.my_site
lbh = self.my_dsa
else:
lsite = self.my_site
lbh = self.get_bridgehead(lsite, part, transport,
partial_ok, detect_failed)
# TODO
if lbh is None:
debug.DEBUG_RED("DISASTER! lbh is None")
return False, True
DEBUG_FN("lsite: %s\nrsite: %s" % (lsite, rsite))
DEBUG_FN("vertices %s" % (e.vertices,))
debug.DEBUG_BLUE("bridgeheads\n%s\n%s\n%s" % (lbh, rbh, "-" * 70))
sitelink = e.site_link
if sitelink is None:
link_opt = 0x0
link_sched = None
else:
link_opt = sitelink.options
link_sched = sitelink.schedule
self.create_connection(part, rbh, rsite, transport,
lbh, lsite, link_opt, link_sched,
partial_ok, detect_failed)
return all_connected, found_failed
def create_intersite_connections(self):
"""Create NTDSConnections as necessary for all partitions.
Computes an NC replica graph for each NC replica that "should be
present" on the local DC or "is present" on any DC in the same site
as the local DC. For each edge directed to an NC replica on such a
DC from an NC replica on a DC in another site, the KCC creates an
nTDSConnection object to imply that edge if one does not already
exist.
Modifies self.kept_connections - A set of nTDSConnection
objects for edges that are directed
to the local DC's site in one or more NC replica graphs.
:return: True if spanning trees were created for all NC replica
graphs, otherwise False.
"""
all_connected = True
self.kept_connections = set()
# LET crossRefList be the set containing each object o of class
# crossRef such that o is a child of the CN=Partitions child of the
# config NC
# FOR each crossRef object cr in crossRefList
# IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
# is clear in cr!systemFlags, skip cr.
# LET g be the GRAPH return of SetupGraph()
for part in self.part_table.values():
if not part.is_enabled():
continue
if part.is_foreign():
continue
graph = self.setup_graph(part)
# Create nTDSConnection objects, routing replication traffic
# around "failed" DCs.
found_failed = False
connected, found_failed = self.create_connections(graph,
part, True)
DEBUG("with detect_failed: connected %s Found failed %s" %
(connected, found_failed))
if not connected:
all_connected = False
if found_failed:
# One or more failed DCs preclude use of the ideal NC
# replica graph. Add connections for the ideal graph.
self.create_connections(graph, part, False)
return all_connected
def intersite(self, ping):
"""Generate the inter-site KCC replica graph and nTDSConnections
As per MS-ADTS 6.2.2.3.
If self.readonly is False, the connections are added to self.samdb.
Produces self.kept_connections which is a set of NTDS
Connections that should be kept during subsequent pruning
process.
After this has run, all sites should be connected in a minimum
spanning tree.
:param ping: An oracle function of remote site availability
:return (True or False): (True) if the produced NC replica
graph connects all sites that need to be connected
"""
# Retrieve my DSA
mydsa = self.my_dsa
mysite = self.my_site
all_connected = True
DEBUG_FN("intersite(): enter")
# Determine who is the ISTG
if self.readonly:
mysite.select_istg(self.samdb, mydsa, ro=True)
else:
mysite.select_istg(self.samdb, mydsa, ro=False)
# Test whether local site has topology disabled
if mysite.is_intersite_topology_disabled():
DEBUG_FN("intersite(): exit disabled all_connected=%d" %
all_connected)
return all_connected
if not mydsa.is_istg():
DEBUG_FN("intersite(): exit not istg all_connected=%d" %
all_connected)
return all_connected
self.merge_failed_links(ping)
# For each NC with an NC replica that "should be present" on the
# local DC or "is present" on any DC in the same site as the
# local DC, the KCC constructs a site graph--a precursor to an NC
# replica graph. The site connectivity for a site graph is defined
# by objects of class interSiteTransport, siteLink, and
# siteLinkBridge in the config NC.
all_connected = self.create_intersite_connections()
DEBUG_FN("intersite(): exit all_connected=%d" % all_connected)
return all_connected
# This function currently does no actions. The reason being that we cannot
# perform modifies in this way on the RODC.
def update_rodc_connection(self, ro=True):
"""Updates the RODC NTFRS connection object.
If the local DSA is not an RODC, this does nothing.
"""
if not self.my_dsa.is_ro():
return
# Given an nTDSConnection object cn1, such that cn1.options contains
# NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
# does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
# that the following is true:
#
# cn1.fromServer = cn2.fromServer
# cn1.schedule = cn2.schedule
#
# If no such cn2 can be found, cn1 is not modified.
# If no such cn1 can be found, nothing is modified by this task.
all_connections = self.my_dsa.connect_table.values()
ro_connections = [x for x in all_connections if x.is_rodc_topology()]
rw_connections = [x for x in all_connections
if x not in ro_connections]
# XXX here we are dealing with multiple RODC_TOPO connections,
# if they exist. It is not clear whether the spec means that
# or if it ever arises.
if rw_connections and ro_connections:
for con in ro_connections:
cn2 = rw_connections[0]
con.from_dnstr = cn2.from_dnstr
con.schedule = cn2.schedule
con.to_be_modified = True
self.my_dsa.commit_connections(self.samdb, ro=ro)
def intrasite_max_node_edges(self, node_count):
"""Find the maximum number of edges directed to an intrasite node
The KCC does not create more than 50 edges directed to a
single DC. To optimize replication, we compute that each node
should have n+2 total edges directed to it such that (n) is
the smallest non-negative integer satisfying
(node_count <= 2*(n*n) + 6*n + 7)
(If the number of edges is m (i.e. n + 2), that is the same as
2 * m*m - 2 * m + 3). We think in terms of n because that is
the number of extra connections over the double directed ring
that exists by default.
edges n nodecount
2 0 7
3 1 15
4 2 27
5 3 43
...
50 48 4903
:param node_count: total number of nodes in the replica graph
The intention is that there should be no more than 3 hops
between any two DSAs at a site. With up to 7 nodes the 2 edges
of the ring are enough; any configuration of extra edges with
8 nodes will be enough. It is less clear that the 3 hop
guarantee holds at e.g. 15 nodes in degenerate cases, but
those are quite unlikely given the extra edges are randomly
arranged.
:param node_count: the number of nodes in the site
"return: The desired maximum number of connections
"""
n = 0
while True:
if node_count <= (2 * (n * n) + (6 * n) + 7):
break
n = n + 1
n = n + 2
if n < 50:
return n
return 50
def construct_intrasite_graph(self, site_local, dc_local,
nc_x, gc_only, detect_stale):
"""Create an intrasite graph using given parameters
This might be called a number of times per site with different
parameters.
Based on [MS-ADTS] 6.2.2.2
:param site_local: site for which we are working
:param dc_local: local DC that potentially needs a replica
:param nc_x: naming context (x) that we are testing if it
"should be present" on the local DC
:param gc_only: Boolean - only consider global catalog servers
:param detect_stale: Boolean - check whether links seems down
:return: None
"""
# We're using the MS notation names here to allow
# correlation back to the published algorithm.
#
# nc_x - naming context (x) that we are testing if it
# "should be present" on the local DC
# f_of_x - replica (f) found on a DC (s) for NC (x)
# dc_s - DC where f_of_x replica was found
# dc_local - local DC that potentially needs a replica
# (f_of_x)
# r_list - replica list R
# p_of_x - replica (p) is partial and found on a DC (s)
# for NC (x)
# l_of_x - replica (l) is the local replica for NC (x)
# that should appear on the local DC
# r_len = is length of replica list |R|
#
# If the DSA doesn't need a replica for this
# partition (NC x) then continue
needed, ro, partial = nc_x.should_be_present(dc_local)
debug.DEBUG_YELLOW("construct_intrasite_graph(): enter" +
"\n\tgc_only=%d" % gc_only +
"\n\tdetect_stale=%d" % detect_stale +
"\n\tneeded=%s" % needed +
"\n\tro=%s" % ro +
"\n\tpartial=%s" % partial +
"\n%s" % nc_x)
if not needed:
debug.DEBUG_RED("%s lacks 'should be present' status, "
"aborting construct_intrasite_graph!" %
nc_x.nc_dnstr)
return
# Create a NCReplica that matches what the local replica
# should say. We'll use this below in our r_list
l_of_x = NCReplica(dc_local, nc_x.nc_dnstr)
l_of_x.identify_by_basedn(self.samdb)
l_of_x.rep_partial = partial
l_of_x.rep_ro = ro
# Add this replica that "should be present" to the
# needed replica table for this DSA
dc_local.add_needed_replica(l_of_x)
# Replica list
#
# Let R be a sequence containing each writable replica f of x
# such that f "is present" on a DC s satisfying the following
# criteria:
#
# * s is a writable DC other than the local DC.
#
# * s is in the same site as the local DC.
#
# * If x is a read-only full replica and x is a domain NC,
# then the DC's functional level is at least
# DS_BEHAVIOR_WIN2008.
#
# * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
# in the options attribute of the site settings object for
# the local DC's site, or no tuple z exists in the
# kCCFailedLinks or kCCFailedConnections variables such
# that z.UUIDDsa is the objectGUID of the nTDSDSA object
# for s, z.FailureCount > 0, and the current time -
# z.TimeFirstFailure > 2 hours.
r_list = []
# We'll loop thru all the DSAs looking for
# writeable NC replicas that match the naming
# context dn for (nc_x)
#
for dc_s in self.my_site.dsa_table.values():
# If this partition (nc_x) doesn't appear as a
# replica (f_of_x) on (dc_s) then continue
if nc_x.nc_dnstr not in dc_s.current_rep_table:
continue
# Pull out the NCReplica (f) of (x) with the dn
# that matches NC (x) we are examining.
f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
# Replica (f) of NC (x) must be writable
if f_of_x.is_ro():
continue
# Replica (f) of NC (x) must satisfy the
# "is present" criteria for DC (s) that
# it was found on
if not f_of_x.is_present():
continue
# DC (s) must be a writable DSA other than
# my local DC. In other words we'd only replicate
# from other writable DC
if dc_s.is_ro() or dc_s is dc_local:
continue
# Certain replica graphs are produced only
# for global catalogs, so test against
# method input parameter
if gc_only and not dc_s.is_gc():
continue
# DC (s) must be in the same site as the local DC
# as this is the intra-site algorithm. This is
# handled by virtue of placing DSAs in per
# site objects (see enclosing for() loop)
# If NC (x) is intended to be read-only full replica
# for a domain NC on the target DC then the source
# DC should have functional level at minimum WIN2008
#
# Effectively we're saying that in order to replicate
# to a targeted RODC (which was introduced in Windows 2008)
# then we have to replicate from a DC that is also minimally
# at that level.
#
# You can also see this requirement in the MS special
# considerations for RODC which state that to deploy
# an RODC, at least one writable domain controller in
# the domain must be running Windows Server 2008
if ro and not partial and nc_x.nc_type == NCType.domain:
if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
continue
# If we haven't been told to turn off stale connection
# detection and this dsa has a stale connection then
# continue
if detect_stale and self.is_stale_link_connection(dc_s):
continue
# Replica meets criteria. Add it to table indexed
# by the GUID of the DC that it appears on
r_list.append(f_of_x)
# If a partial (not full) replica of NC (x) "should be present"
# on the local DC, append to R each partial replica (p of x)
# such that p "is present" on a DC satisfying the same
# criteria defined above for full replica DCs.
#
# XXX This loop and the previous one differ only in whether
# the replica is partial or not. here we only accept partial
# (because we're partial); before we only accepted full. Order
# doen't matter (the list is sorted a few lines down) so these
# loops could easily be merged. Or this could be a helper
# function.
if partial:
# Now we loop thru all the DSAs looking for
# partial NC replicas that match the naming
# context dn for (NC x)
for dc_s in self.my_site.dsa_table.values():
# If this partition NC (x) doesn't appear as a
# replica (p) of NC (x) on the dsa DC (s) then
# continue
if nc_x.nc_dnstr not in dc_s.current_rep_table:
continue
# Pull out the NCReplica with the dn that
# matches NC (x) we are examining.
p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
# Replica (p) of NC (x) must be partial
if not p_of_x.is_partial():
continue
# Replica (p) of NC (x) must satisfy the
# "is present" criteria for DC (s) that
# it was found on
if not p_of_x.is_present():
continue
# DC (s) must be a writable DSA other than
# my DSA. In other words we'd only replicate
# from other writable DSA
if dc_s.is_ro() or dc_s is dc_local:
continue
# Certain replica graphs are produced only
# for global catalogs, so test against
# method input parameter
if gc_only and not dc_s.is_gc():
continue
# If we haven't been told to turn off stale connection
# detection and this dsa has a stale connection then
# continue
if detect_stale and self.is_stale_link_connection(dc_s):
continue
# Replica meets criteria. Add it to table indexed
# by the GUID of the DSA that it appears on
r_list.append(p_of_x)
# Append to R the NC replica that "should be present"
# on the local DC
r_list.append(l_of_x)
r_list.sort(key=lambda rep: ndr_pack(rep.rep_dsa_guid))
r_len = len(r_list)
max_node_edges = self.intrasite_max_node_edges(r_len)
# Add a node for each r_list element to the replica graph
graph_list = []
for rep in r_list:
node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
graph_list.append(node)
# For each r(i) from (0 <= i < |R|-1)
i = 0
while i < (r_len - 1):
# Add an edge from r(i) to r(i+1) if r(i) is a full
# replica or r(i+1) is a partial replica
if not r_list[i].is_partial() or r_list[i +1].is_partial():
graph_list[i + 1].add_edge_from(r_list[i].rep_dsa_dnstr)
# Add an edge from r(i+1) to r(i) if r(i+1) is a full
# replica or ri is a partial replica.
if not r_list[i + 1].is_partial() or r_list[i].is_partial():
graph_list[i].add_edge_from(r_list[i + 1].rep_dsa_dnstr)
i = i + 1
# Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
# or r0 is a partial replica.
if not r_list[r_len - 1].is_partial() or r_list[0].is_partial():
graph_list[0].add_edge_from(r_list[r_len - 1].rep_dsa_dnstr)
# Add an edge from r0 to r|R|-1 if r0 is a full replica or
# r|R|-1 is a partial replica.
if not r_list[0].is_partial() or r_list[r_len -1].is_partial():
graph_list[r_len - 1].add_edge_from(r_list[0].rep_dsa_dnstr)
DEBUG("r_list is length %s" % len(r_list))
DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr))
for x in r_list))
do_dot_files = self.dot_file_dir is not None and self.debug
if self.verify or do_dot_files:
dot_edges = []
dot_vertices = set()
for v1 in graph_list:
dot_vertices.add(v1.dsa_dnstr)
for v2 in v1.edge_from:
dot_edges.append((v2, v1.dsa_dnstr))
dot_vertices.add(v2)
verify_properties = ('connected',)
verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
label='%s__%s__%s' % (site_local.site_dnstr,
nctype_lut[nc_x.nc_type],
nc_x.nc_dnstr),
properties=verify_properties, debug=DEBUG,
verify=self.verify,
dot_file_dir=self.dot_file_dir,
directed=True)
rw_dot_vertices = set(x for x in dot_vertices
if not self.get_dsa(x).is_ro())
rw_dot_edges = [(a, b) for a, b in dot_edges if
a in rw_dot_vertices and b in rw_dot_vertices]
rw_verify_properties = ('connected',
'directed_double_ring_or_small')
verify_and_dot('intrasite_rw_pre_ntdscon', rw_dot_edges,
rw_dot_vertices,
label='%s__%s__%s' % (site_local.site_dnstr,
nctype_lut[nc_x.nc_type],
nc_x.nc_dnstr),
properties=rw_verify_properties, debug=DEBUG,
verify=self.verify,
dot_file_dir=self.dot_file_dir,
directed=True)
# For each existing nTDSConnection object implying an edge
# from rj of R to ri such that j != i, an edge from rj to ri
# is not already in the graph, and the total edges directed
# to ri is less than n+2, the KCC adds that edge to the graph.
for vertex in graph_list:
dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
for connect in dsa.connect_table.values():
remote = connect.from_dnstr
if remote in self.my_site.dsa_table:
vertex.add_edge_from(remote)
DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
for tnode in graph_list:
# To optimize replication latency in sites with many NC
# replicas, the KCC adds new edges directed to ri to bring
# the total edges to n+2, where the NC replica rk of R
# from which the edge is directed is chosen at random such
# that k != i and an edge from rk to ri is not already in
# the graph.
#
# Note that the KCC tech ref does not give a number for
# the definition of "sites with many NC replicas". At a
# bare minimum to satisfy n+2 edges directed at a node we
# have to have at least three replicas in |R| (i.e. if n
# is zero then at least replicas from two other graph
# nodes may direct edges to us).
if r_len >= 3 and not tnode.has_sufficient_edges():
candidates = [x for x in graph_list if
(x is not tnode and
x.dsa_dnstr not in tnode.edge_from)]
debug.DEBUG_BLUE("looking for random link for %s. r_len %d, "
"graph len %d candidates %d"
% (tnode.dsa_dnstr, r_len, len(graph_list),
len(candidates)))
DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
while candidates and not tnode.has_sufficient_edges():
other = random.choice(candidates)
DEBUG("trying to add candidate %s" % other.dsa_dnstr)
if not tnode.add_edge_from(other.dsa_dnstr):
debug.DEBUG_RED("could not add %s" % other.dsa_dnstr)
candidates.remove(other)
else:
DEBUG_FN("not adding links to %s: nodes %s, links is %s/%s" %
(tnode.dsa_dnstr, r_len, len(tnode.edge_from),
tnode.max_edges))
# Print the graph node in debug mode
DEBUG_FN("%s" % tnode)
# For each edge directed to the local DC, ensure a nTDSConnection
# points to us that satisfies the KCC criteria
if tnode.dsa_dnstr == dc_local.dsa_dnstr:
tnode.add_connections_from_edges(dc_local, self.ip_transport)
if self.verify or do_dot_files:
dot_edges = []
dot_vertices = set()
for v1 in graph_list:
dot_vertices.add(v1.dsa_dnstr)
for v2 in v1.edge_from:
dot_edges.append((v2, v1.dsa_dnstr))
dot_vertices.add(v2)
verify_properties = ('connected',)
verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
label='%s__%s__%s' % (site_local.site_dnstr,
nctype_lut[nc_x.nc_type],
nc_x.nc_dnstr),
properties=verify_properties, debug=DEBUG,
verify=self.verify,
dot_file_dir=self.dot_file_dir,
directed=True)
rw_dot_vertices = set(x for x in dot_vertices
if not self.get_dsa(x).is_ro())
rw_dot_edges = [(a, b) for a, b in dot_edges if
a in rw_dot_vertices and b in rw_dot_vertices]
rw_verify_properties = ('connected',
'directed_double_ring_or_small')
verify_and_dot('intrasite_rw_post_ntdscon', rw_dot_edges,
rw_dot_vertices,
label='%s__%s__%s' % (site_local.site_dnstr,
nctype_lut[nc_x.nc_type],
nc_x.nc_dnstr),
properties=rw_verify_properties, debug=DEBUG,
verify=self.verify,
dot_file_dir=self.dot_file_dir,
directed=True)
def intrasite(self):
"""Generate the intrasite KCC connections
As per MS-ADTS 6.2.2.2.
If self.readonly is False, the connections are added to self.samdb.
After this call, all DCs in each site with more than 3 DCs
should be connected in a bidirectional ring. If a site has 2
DCs, they will bidirectionally connected. Sites with many DCs
may have arbitrary extra connections.
:return: None
"""
mydsa = self.my_dsa
DEBUG_FN("intrasite(): enter")
# Test whether local site has topology disabled
mysite = self.my_site
if mysite.is_intrasite_topology_disabled():
return
detect_stale = (not mysite.is_detect_stale_disabled())
for connect in mydsa.connect_table.values():
if connect.to_be_added:
debug.DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
# Loop thru all the partitions, with gc_only False
for partdn, part in self.part_table.items():
self.construct_intrasite_graph(mysite, mydsa, part, False,
detect_stale)
for connect in mydsa.connect_table.values():
if connect.to_be_added:
debug.DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
# If the DC is a GC server, the KCC constructs an additional NC
# replica graph (and creates nTDSConnection objects) for the
# config NC as above, except that only NC replicas that "are present"
# on GC servers are added to R.
for connect in mydsa.connect_table.values():
if connect.to_be_added:
debug.DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
# Do it again, with gc_only True
for partdn, part in self.part_table.items():
if part.is_config():
self.construct_intrasite_graph(mysite, mydsa, part, True,
detect_stale)
# The DC repeats the NC replica graph computation and nTDSConnection
# creation for each of the NC replica graphs, this time assuming
# that no DC has failed. It does so by re-executing the steps as
# if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
# set in the options attribute of the site settings object for
# the local DC's site. (ie. we set "detec_stale" flag to False)
for connect in mydsa.connect_table.values():
if connect.to_be_added:
debug.DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
# Loop thru all the partitions.
for partdn, part in self.part_table.items():
self.construct_intrasite_graph(mysite, mydsa, part, False,
False) # don't detect stale
# If the DC is a GC server, the KCC constructs an additional NC
# replica graph (and creates nTDSConnection objects) for the
# config NC as above, except that only NC replicas that "are present"
# on GC servers are added to R.
for connect in mydsa.connect_table.values():
if connect.to_be_added:
debug.DEBUG_RED("TO BE ADDED:\n%s" % connect)
for partdn, part in self.part_table.items():
if part.is_config():
self.construct_intrasite_graph(mysite, mydsa, part, True,
False) # don't detect stale
self._commit_changes(mydsa)
def list_dsas(self):
"""Compile a comprehensive list of DSA DNs
These are all the DSAs on all the sites that KCC would be
dealing with.
This method is not idempotent and may not work correctly in
sequence with KCC.run().
:return: a list of DSA DN strings.
"""
self.load_my_site()
self.load_my_dsa()
self.load_all_sites()
self.load_all_partitions()
self.load_ip_transport()
self.load_all_sitelinks()
dsas = []
for site in self.site_table.values():
dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
for dsa in site.dsa_table.values()])
return dsas
def load_samdb(self, dburl, lp, creds, force=False):
"""Load the database using an url, loadparm, and credentials
If force is False, the samdb won't be reloaded if it already
exists.
:param dburl: a database url.
:param lp: a loadparm object.
:param creds: a Credentials object.
:param force: a boolean indicating whether to overwrite.
"""
if force or self.samdb is None:
try:
self.samdb = SamDB(url=dburl,
session_info=system_session(),
credentials=creds, lp=lp)
except ldb.LdbError as e1:
(num, msg) = e1.args
raise KCCError("Unable to open sam database %s : %s" %
(dburl, msg))
def plot_all_connections(self, basename, verify_properties=()):
"""Helper function to plot and verify NTDSConnections
:param basename: an identifying string to use in filenames and logs.
:param verify_properties: properties to verify (default empty)
"""
verify = verify_properties and self.verify
if not verify and self.dot_file_dir is None:
return
dot_edges = []
dot_vertices = []
edge_colours = []
vertex_colours = []
for dsa in self.dsa_by_dnstr.values():
dot_vertices.append(dsa.dsa_dnstr)
if dsa.is_ro():
vertex_colours.append('#cc0000')
else:
vertex_colours.append('#0000cc')
for con in dsa.connect_table.values():
if con.is_rodc_topology():
edge_colours.append('red')
else:
edge_colours.append('blue')
dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
verify_and_dot(basename, dot_edges, vertices=dot_vertices,
label=self.my_dsa_dnstr,
properties=verify_properties, debug=DEBUG,
verify=verify, dot_file_dir=self.dot_file_dir,
directed=True, edge_colors=edge_colours,
vertex_colors=vertex_colours)
def run(self, dburl, lp, creds, forced_local_dsa=None,
forget_local_links=False, forget_intersite_links=False,
attempt_live_connections=False):
"""Perform a KCC run, possibly updating repsFrom topology
:param dburl: url of the database to work with.
:param lp: a loadparm object.
:param creds: a Credentials object.
:param forced_local_dsa: pretend to be on the DSA with this dn_str
:param forget_local_links: calculate as if no connections existed
(boolean, default False)
:param forget_intersite_links: calculate with only intrasite connection
(boolean, default False)
:param attempt_live_connections: attempt to connect to remote DSAs to
determine link availability (boolean, default False)
:return: 1 on error, 0 otherwise
"""
if self.samdb is None:
DEBUG_FN("samdb is None; let's load it from %s" % (dburl,))
self.load_samdb(dburl, lp, creds, force=False)
if forced_local_dsa:
self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" %
forced_local_dsa)
try:
# Setup
self.load_my_site()
self.load_my_dsa()
self.load_all_sites()
self.load_all_partitions()
self.load_ip_transport()
self.load_all_sitelinks()
if self.verify or self.dot_file_dir is not None:
guid_to_dnstr = {}
for site in self.site_table.values():
guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
for dnstr, dsa
in site.dsa_table.items())
self.plot_all_connections('dsa_initial')
dot_edges = []
current_reps, needed_reps = self.my_dsa.get_rep_tables()
for dnstr, c_rep in current_reps.items():
DEBUG("c_rep %s" % c_rep)
dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
verify_and_dot('dsa_repsFrom_initial', dot_edges,
directed=True, label=self.my_dsa_dnstr,
properties=(), debug=DEBUG, verify=self.verify,
dot_file_dir=self.dot_file_dir)
dot_edges = []
for site in self.site_table.values():
for dsa in site.dsa_table.values():
current_reps, needed_reps = dsa.get_rep_tables()
for dn_str, rep in current_reps.items():
for reps_from in rep.rep_repsFrom:
DEBUG("rep %s" % rep)
dsa_guid = str(reps_from.source_dsa_obj_guid)
dsa_dn = guid_to_dnstr[dsa_guid]
dot_edges.append((dsa.dsa_dnstr, dsa_dn))
verify_and_dot('dsa_repsFrom_initial_all', dot_edges,
directed=True, label=self.my_dsa_dnstr,
properties=(), debug=DEBUG, verify=self.verify,
dot_file_dir=self.dot_file_dir)
dot_edges = []
dot_colours = []
for link in self.sitelink_table.values():
from hashlib import md5
tmp_str = link.dnstr.encode('utf8')
colour = '#' + md5(tmp_str).hexdigest()[:6]
for a, b in itertools.combinations(link.site_list, 2):
dot_edges.append((a[1], b[1]))
dot_colours.append(colour)
properties = ('connected',)
verify_and_dot('dsa_sitelink_initial', dot_edges,
directed=False,
label=self.my_dsa_dnstr, properties=properties,
debug=DEBUG, verify=self.verify,
dot_file_dir=self.dot_file_dir,
edge_colors=dot_colours)
if forget_local_links:
for dsa in self.my_site.dsa_table.values():
dsa.connect_table = dict((k, v) for k, v in
dsa.connect_table.items()
if v.is_rodc_topology() or
(v.from_dnstr not in
self.my_site.dsa_table))
self.plot_all_connections('dsa_forgotten_local')
if forget_intersite_links:
for site in self.site_table.values():
for dsa in site.dsa_table.values():
dsa.connect_table = dict((k, v) for k, v in
dsa.connect_table.items()
if site is self.my_site and
v.is_rodc_topology())
self.plot_all_connections('dsa_forgotten_all')
if attempt_live_connections:
# Encapsulates lp and creds in a function that
# attempts connections to remote DSAs.
def ping(self, dnsname):
try:
drs_utils.drsuapi_connect(dnsname, self.lp, self.creds)
except drs_utils.drsException:
return False
return True
else:
ping = None
# These are the published steps (in order) for the
# MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
# Step 1
self.refresh_failed_links_connections(ping)
# Step 2
self.intrasite()
# Step 3
all_connected = self.intersite(ping)
# Step 4
self.remove_unneeded_ntdsconn(all_connected)
# Step 5
self.translate_ntdsconn()
# Step 6
self.remove_unneeded_failed_links_connections()
# Step 7
self.update_rodc_connection()
if self.verify or self.dot_file_dir is not None:
self.plot_all_connections('dsa_final',
('connected',))
debug.DEBUG_MAGENTA("there are %d dsa guids" %
len(guid_to_dnstr))
dot_edges = []
edge_colors = []
my_dnstr = self.my_dsa.dsa_dnstr
current_reps, needed_reps = self.my_dsa.get_rep_tables()
for dnstr, n_rep in needed_reps.items():
for reps_from in n_rep.rep_repsFrom:
guid_str = str(reps_from.source_dsa_obj_guid)
dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
edge_colors.append('#' + str(n_rep.nc_guid)[:6])
verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True,
label=self.my_dsa_dnstr,
properties=(), debug=DEBUG, verify=self.verify,
dot_file_dir=self.dot_file_dir,
edge_colors=edge_colors)
dot_edges = []
for site in self.site_table.values():
for dsa in site.dsa_table.values():
current_reps, needed_reps = dsa.get_rep_tables()
for n_rep in needed_reps.values():
for reps_from in n_rep.rep_repsFrom:
dsa_guid = str(reps_from.source_dsa_obj_guid)
dsa_dn = guid_to_dnstr[dsa_guid]
dot_edges.append((dsa.dsa_dnstr, dsa_dn))
verify_and_dot('dsa_repsFrom_final_all', dot_edges,
directed=True, label=self.my_dsa_dnstr,
properties=(), debug=DEBUG, verify=self.verify,
dot_file_dir=self.dot_file_dir)
except:
raise
return 0
def import_ldif(self, dburl, lp, ldif_file, forced_local_dsa=None):
"""Import relevant objects and attributes from an LDIF file.
The point of this function is to allow a programmer/debugger to
import an LDIF file with non-security relevent information that
was previously extracted from a DC database. The LDIF file is used
to create a temporary abbreviated database. The KCC algorithm can
then run against this abbreviated database for debug or test
verification that the topology generated is computationally the
same between different OSes and algorithms.
:param dburl: path to the temporary abbreviated db to create
:param lp: a loadparm object.
:param ldif_file: path to the ldif file to import
:param forced_local_dsa: perform KCC from this DSA's point of view
:return: zero on success, 1 on error
"""
try:
self.samdb = ldif_import_export.ldif_to_samdb(dburl, lp, ldif_file,
forced_local_dsa)
except ldif_import_export.LdifError as e:
logger.critical(e)
return 1
return 0
def export_ldif(self, dburl, lp, creds, ldif_file):
"""Save KCC relevant details to an ldif file
The point of this function is to allow a programmer/debugger to
extract an LDIF file with non-security relevent information from
a DC database. The LDIF file can then be used to "import" via
the import_ldif() function this file into a temporary abbreviated
database. The KCC algorithm can then run against this abbreviated
database for debug or test verification that the topology generated
is computationally the same between different OSes and algorithms.
:param dburl: LDAP database URL to extract info from
:param lp: a loadparm object.
:param cred: a Credentials object.
:param ldif_file: output LDIF file name to create
:return: zero on success, 1 on error
"""
try:
ldif_import_export.samdb_to_ldif_file(self.samdb, dburl, lp, creds,
ldif_file)
except ldif_import_export.LdifError as e:
logger.critical(e)
return 1
return 0
| gpl-3.0 |
stephen144/odoo | openerp/report/preprocess.py | 49 | 3753 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from lxml import etree
import re
rml_parents = ['tr','story','section']
html_parents = ['tr','body','div']
sxw_parents = ['{http://openoffice.org/2000/table}table-row','{http://openoffice.org/2000/office}body','{http://openoffice.org/2000/text}section']
odt_parents = ['{urn:oasis:names:tc:opendocument:xmlns:office:1.0}body','{urn:oasis:names:tc:opendocument:xmlns:table:1.0}table-row','{urn:oasis:names:tc:opendocument:xmlns:text:1.0}section']
class report(object):
def preprocess_rml(self, root_node,type='pdf'):
_regex1 = re.compile("\[\[(.*?)(repeatIn\(.*?\s*,\s*[\'\"].*?[\'\"]\s*(?:,\s*(.*?)\s*)?\s*\))(.*?)\]\]")
_regex11= re.compile("\[\[(.*?)(repeatIn\(.*?\s*\(.*?\s*[\'\"].*?[\'\"]\s*\),[\'\"].*?[\'\"](?:,\s*(.*?)\s*)?\s*\))(.*?)\]\]")
_regex2 = re.compile("\[\[(.*?)(removeParentNode\(\s*(?:['\"](.*?)['\"])\s*\))(.*?)\]\]")
_regex3 = re.compile("\[\[\s*(.*?setTag\(\s*['\"](.*?)['\"]\s*,\s*['\"].*?['\"]\s*(?:,.*?)?\).*?)\s*\]\]")
for node in root_node:
if node.tag == etree.Comment:
continue
if node.text or node.tail:
def _sub3(txt):
n = node
while n.tag != txt.group(2):
n = n.getparent()
n.set('rml_tag', txt.group(1))
return "[[ '' ]]"
def _sub2(txt):
if txt.group(3):
n = node
try:
while n.tag != txt.group(3):
n = n.getparent()
except Exception:
n = node
else:
n = node.getparent()
n.set('rml_except', txt.group(0)[2:-2])
return txt.group(0)
def _sub1(txt):
if len(txt.group(4)) > 1:
return " "
match = rml_parents
if type == 'odt':
match = odt_parents
if type == 'sxw':
match = sxw_parents
if type =='html2html':
match = html_parents
if txt.group(3):
group_3 = txt.group(3)
if group_3.startswith("'") or group_3.startswith('"'):
group_3 = group_3[1:-1]
match = [group_3]
n = node
while n.tag not in match:
n = n.getparent()
n.set('rml_loop', txt.group(2))
return '[['+txt.group(1)+"''"+txt.group(4)+']]'
t = _regex1.sub(_sub1, node.text or node.tail)
if t == " ":
t = _regex11.sub(_sub1, node.text or node.tail)
t = _regex3.sub(_sub3, t)
node.text = _regex2.sub(_sub2, t)
self.preprocess_rml(node,type)
return root_node
if __name__=='__main__':
node = etree.XML('''<story>
<para>This is a test[[ setTag('para','xpre') ]]</para>
<blockTable>
<tr>
<td><para>Row 1 [[ setTag('tr','tr',{'style':'TrLevel'+str(a['level']), 'paraStyle':('Level'+str(a['level']))}) ]] </para></td>
<td>Row 2 [[ True and removeParentNode('td') ]] </td>
</tr><tr>
<td>Row 1 [[repeatIn(o.order_line,'o')]] </td>
<td>Row 2</td>
</tr>
</blockTable>
<p>This isa test</p>
</story>''')
a = report()
result = a.preprocess_rml(node)
print etree.tostring(result)
| agpl-3.0 |
cpcloud/numba | numba/tests/test_casting.py | 3 | 2913 | from numba import unittest_support as unittest
import numpy as np
from numba.compiler import compile_isolated
from numba import types, njit
import struct
def float_to_int(x):
return types.int32(x)
def int_to_float(x):
return types.float64(x) / 2
def float_to_unsigned(x):
return types.uint32(x)
def float_to_complex(x):
return types.complex128(x)
class TestCasting(unittest.TestCase):
def test_float_to_int(self):
pyfunc = float_to_int
cr = compile_isolated(pyfunc, [types.float32])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.int32)
self.assertEqual(cfunc(12.3), pyfunc(12.3))
self.assertEqual(cfunc(12.3), int(12.3))
self.assertEqual(cfunc(-12.3), pyfunc(-12.3))
self.assertEqual(cfunc(-12.3), int(-12.3))
def test_int_to_float(self):
pyfunc = int_to_float
cr = compile_isolated(pyfunc, [types.int64])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.float64)
self.assertEqual(cfunc(321), pyfunc(321))
self.assertEqual(cfunc(321), 321. / 2)
def test_float_to_unsigned(self):
pyfunc = float_to_unsigned
cr = compile_isolated(pyfunc, [types.float32])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.uint32)
self.assertEqual(cfunc(3.21), pyfunc(3.21))
self.assertEqual(cfunc(3.21), struct.unpack('I', struct.pack('i',
3))[0])
def test_float_to_complex(self):
pyfunc = float_to_complex
cr = compile_isolated(pyfunc, [types.float64])
cfunc = cr.entry_point
self.assertEqual(cr.signature.return_type, types.complex128)
self.assertEqual(cfunc(-3.21), pyfunc(-3.21))
self.assertEqual(cfunc(-3.21), -3.21 + 0j)
def test_array_to_array(self):
"""Make sure this compiles.
Cast C to A array
"""
@njit("f8(f8[:])")
def inner(x):
return x[0]
inner.disable_compile()
@njit("f8(f8[::1])")
def driver(x):
return inner(x)
x = np.array([1234], dtype=np.float64)
self.assertEqual(driver(x), x[0])
self.assertEqual(len(inner.overloads), 1)
def test_optional_to_optional(self):
"""
Test error due mishandling of Optional to Optional casting
Related issue: https://github.com/numba/numba/issues/1718
"""
# Attempt to cast optional(intp) to optional(float64)
opt_int = types.Optional(types.intp)
opt_flt = types.Optional(types.float64)
sig = opt_flt(opt_int)
@njit(sig)
def foo(a):
return a
self.assertEqual(foo(2), 2)
self.assertIsNone(foo(None))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
ojengwa/odoo | addons/association/__init__.py | 886 | 1054 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
delinhabit/django | django/shortcuts.py | 129 | 7958 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
import warnings
from django.core import urlresolvers
from django.db.models.base import ModelBase
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import RequestContext, loader
from django.template.context import _current_app_undefined
from django.template.engine import (
_context_instance_undefined, _dictionary_undefined, _dirs_undefined,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, dirs=_dirs_undefined,
dictionary=_dictionary_undefined, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
if (context_instance is _context_instance_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
content = loader.render_to_string(template_name, context, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None,
context_instance=_context_instance_undefined,
content_type=None, status=None, current_app=_current_app_undefined,
dirs=_dirs_undefined, dictionary=_dictionary_undefined,
using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
Uses a RequestContext by default.
"""
if (context_instance is _context_instance_undefined
and current_app is _current_app_undefined
and dirs is _dirs_undefined
and dictionary is _dictionary_undefined):
# No deprecated arguments were passed - use the new code path
# In Django 1.10, request should become a positional argument.
content = loader.render_to_string(
template_name, context, request=request, using=using)
else:
# Some deprecated arguments were passed - use the legacy code path
if context_instance is not _context_instance_undefined:
if current_app is not _current_app_undefined:
raise ValueError('If you provide a context_instance you must '
'set its current_app before calling render()')
else:
context_instance = RequestContext(request)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of render is deprecated. "
"Set the current_app attribute of request instead.",
RemovedInDjango110Warning, stacklevel=2)
request.current_app = current_app
# Directly set the private attribute to avoid triggering the
# warning in RequestContext.__init__.
context_instance._current_app = current_app
content = loader.render_to_string(
template_name, context, context_instance, dirs, dictionary,
using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Raises a ValueError if klass is not a Model, Manager, or QuerySet.
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, Manager):
manager = klass
elif isinstance(klass, ModelBase):
manager = klass._default_manager
else:
if isinstance(klass, type):
klass__name = klass.__name__
else:
klass__name = klass.__class__.__name__
raise ValueError("Object is of type '%s', but must be a Django Model, "
"Manager, or QuerySet" % klass__name)
return manager.all()
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
obj_list = list(queryset.filter(*args, **kwargs))
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urlresolvers.reverse()` will
be used to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return urlresolvers.reverse(to, args=args, kwargs=kwargs)
except urlresolvers.NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
| bsd-3-clause |
lra/boto | boto/services/__init__.py | 782 | 1108 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
pschmitt/home-assistant | tests/components/stream/test_init.py | 5 | 3066 | """The tests for stream."""
import pytest
from homeassistant.components.stream.const import (
ATTR_STREAMS,
CONF_LOOKBACK,
CONF_STREAM_SOURCE,
DOMAIN,
SERVICE_RECORD,
)
from homeassistant.const import CONF_FILENAME
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, MagicMock, patch
async def test_record_service_invalid_file(hass):
"""Test record service call with invalid file."""
await async_setup_component(hass, "stream", {"stream": {}})
data = {CONF_STREAM_SOURCE: "rtsp://my.video", CONF_FILENAME: "/my/invalid/path"}
with pytest.raises(HomeAssistantError):
await hass.services.async_call(DOMAIN, SERVICE_RECORD, data, blocking=True)
async def test_record_service_init_stream(hass):
"""Test record service call with invalid file."""
await async_setup_component(hass, "stream", {"stream": {}})
data = {CONF_STREAM_SOURCE: "rtsp://my.video", CONF_FILENAME: "/my/invalid/path"}
with patch("homeassistant.components.stream.Stream") as stream_mock, patch.object(
hass.config, "is_allowed_path", return_value=True
):
# Setup stubs
stream_mock.return_value.outputs = {}
# Call Service
await hass.services.async_call(DOMAIN, SERVICE_RECORD, data, blocking=True)
# Assert
assert stream_mock.called
async def test_record_service_existing_record_session(hass):
"""Test record service call with invalid file."""
await async_setup_component(hass, "stream", {"stream": {}})
source = "rtsp://my.video"
data = {CONF_STREAM_SOURCE: source, CONF_FILENAME: "/my/invalid/path"}
# Setup stubs
stream_mock = MagicMock()
stream_mock.return_value.outputs = {"recorder": MagicMock()}
hass.data[DOMAIN][ATTR_STREAMS][source] = stream_mock
with patch.object(hass.config, "is_allowed_path", return_value=True), pytest.raises(
HomeAssistantError
):
# Call Service
await hass.services.async_call(DOMAIN, SERVICE_RECORD, data, blocking=True)
async def test_record_service_lookback(hass):
"""Test record service call with invalid file."""
await async_setup_component(hass, "stream", {"stream": {}})
data = {
CONF_STREAM_SOURCE: "rtsp://my.video",
CONF_FILENAME: "/my/invalid/path",
CONF_LOOKBACK: 4,
}
with patch("homeassistant.components.stream.Stream") as stream_mock, patch.object(
hass.config, "is_allowed_path", return_value=True
):
# Setup stubs
hls_mock = MagicMock()
hls_mock.num_segments = 3
hls_mock.target_duration = 2
hls_mock.recv = AsyncMock(return_value=None)
stream_mock.return_value.outputs = {"hls": hls_mock}
# Call Service
await hass.services.async_call(DOMAIN, SERVICE_RECORD, data, blocking=True)
assert stream_mock.called
stream_mock.return_value.add_provider.assert_called_once_with("recorder")
assert hls_mock.recv.called
| apache-2.0 |
AMOboxTV/AMOBox.LegoBuild | script.module.unidecode/lib/unidecode/x0cd.py | 253 | 4738 | data = (
'cyess', # 0x00
'cyeng', # 0x01
'cyej', # 0x02
'cyec', # 0x03
'cyek', # 0x04
'cyet', # 0x05
'cyep', # 0x06
'cyeh', # 0x07
'co', # 0x08
'cog', # 0x09
'cogg', # 0x0a
'cogs', # 0x0b
'con', # 0x0c
'conj', # 0x0d
'conh', # 0x0e
'cod', # 0x0f
'col', # 0x10
'colg', # 0x11
'colm', # 0x12
'colb', # 0x13
'cols', # 0x14
'colt', # 0x15
'colp', # 0x16
'colh', # 0x17
'com', # 0x18
'cob', # 0x19
'cobs', # 0x1a
'cos', # 0x1b
'coss', # 0x1c
'cong', # 0x1d
'coj', # 0x1e
'coc', # 0x1f
'cok', # 0x20
'cot', # 0x21
'cop', # 0x22
'coh', # 0x23
'cwa', # 0x24
'cwag', # 0x25
'cwagg', # 0x26
'cwags', # 0x27
'cwan', # 0x28
'cwanj', # 0x29
'cwanh', # 0x2a
'cwad', # 0x2b
'cwal', # 0x2c
'cwalg', # 0x2d
'cwalm', # 0x2e
'cwalb', # 0x2f
'cwals', # 0x30
'cwalt', # 0x31
'cwalp', # 0x32
'cwalh', # 0x33
'cwam', # 0x34
'cwab', # 0x35
'cwabs', # 0x36
'cwas', # 0x37
'cwass', # 0x38
'cwang', # 0x39
'cwaj', # 0x3a
'cwac', # 0x3b
'cwak', # 0x3c
'cwat', # 0x3d
'cwap', # 0x3e
'cwah', # 0x3f
'cwae', # 0x40
'cwaeg', # 0x41
'cwaegg', # 0x42
'cwaegs', # 0x43
'cwaen', # 0x44
'cwaenj', # 0x45
'cwaenh', # 0x46
'cwaed', # 0x47
'cwael', # 0x48
'cwaelg', # 0x49
'cwaelm', # 0x4a
'cwaelb', # 0x4b
'cwaels', # 0x4c
'cwaelt', # 0x4d
'cwaelp', # 0x4e
'cwaelh', # 0x4f
'cwaem', # 0x50
'cwaeb', # 0x51
'cwaebs', # 0x52
'cwaes', # 0x53
'cwaess', # 0x54
'cwaeng', # 0x55
'cwaej', # 0x56
'cwaec', # 0x57
'cwaek', # 0x58
'cwaet', # 0x59
'cwaep', # 0x5a
'cwaeh', # 0x5b
'coe', # 0x5c
'coeg', # 0x5d
'coegg', # 0x5e
'coegs', # 0x5f
'coen', # 0x60
'coenj', # 0x61
'coenh', # 0x62
'coed', # 0x63
'coel', # 0x64
'coelg', # 0x65
'coelm', # 0x66
'coelb', # 0x67
'coels', # 0x68
'coelt', # 0x69
'coelp', # 0x6a
'coelh', # 0x6b
'coem', # 0x6c
'coeb', # 0x6d
'coebs', # 0x6e
'coes', # 0x6f
'coess', # 0x70
'coeng', # 0x71
'coej', # 0x72
'coec', # 0x73
'coek', # 0x74
'coet', # 0x75
'coep', # 0x76
'coeh', # 0x77
'cyo', # 0x78
'cyog', # 0x79
'cyogg', # 0x7a
'cyogs', # 0x7b
'cyon', # 0x7c
'cyonj', # 0x7d
'cyonh', # 0x7e
'cyod', # 0x7f
'cyol', # 0x80
'cyolg', # 0x81
'cyolm', # 0x82
'cyolb', # 0x83
'cyols', # 0x84
'cyolt', # 0x85
'cyolp', # 0x86
'cyolh', # 0x87
'cyom', # 0x88
'cyob', # 0x89
'cyobs', # 0x8a
'cyos', # 0x8b
'cyoss', # 0x8c
'cyong', # 0x8d
'cyoj', # 0x8e
'cyoc', # 0x8f
'cyok', # 0x90
'cyot', # 0x91
'cyop', # 0x92
'cyoh', # 0x93
'cu', # 0x94
'cug', # 0x95
'cugg', # 0x96
'cugs', # 0x97
'cun', # 0x98
'cunj', # 0x99
'cunh', # 0x9a
'cud', # 0x9b
'cul', # 0x9c
'culg', # 0x9d
'culm', # 0x9e
'culb', # 0x9f
'culs', # 0xa0
'cult', # 0xa1
'culp', # 0xa2
'culh', # 0xa3
'cum', # 0xa4
'cub', # 0xa5
'cubs', # 0xa6
'cus', # 0xa7
'cuss', # 0xa8
'cung', # 0xa9
'cuj', # 0xaa
'cuc', # 0xab
'cuk', # 0xac
'cut', # 0xad
'cup', # 0xae
'cuh', # 0xaf
'cweo', # 0xb0
'cweog', # 0xb1
'cweogg', # 0xb2
'cweogs', # 0xb3
'cweon', # 0xb4
'cweonj', # 0xb5
'cweonh', # 0xb6
'cweod', # 0xb7
'cweol', # 0xb8
'cweolg', # 0xb9
'cweolm', # 0xba
'cweolb', # 0xbb
'cweols', # 0xbc
'cweolt', # 0xbd
'cweolp', # 0xbe
'cweolh', # 0xbf
'cweom', # 0xc0
'cweob', # 0xc1
'cweobs', # 0xc2
'cweos', # 0xc3
'cweoss', # 0xc4
'cweong', # 0xc5
'cweoj', # 0xc6
'cweoc', # 0xc7
'cweok', # 0xc8
'cweot', # 0xc9
'cweop', # 0xca
'cweoh', # 0xcb
'cwe', # 0xcc
'cweg', # 0xcd
'cwegg', # 0xce
'cwegs', # 0xcf
'cwen', # 0xd0
'cwenj', # 0xd1
'cwenh', # 0xd2
'cwed', # 0xd3
'cwel', # 0xd4
'cwelg', # 0xd5
'cwelm', # 0xd6
'cwelb', # 0xd7
'cwels', # 0xd8
'cwelt', # 0xd9
'cwelp', # 0xda
'cwelh', # 0xdb
'cwem', # 0xdc
'cweb', # 0xdd
'cwebs', # 0xde
'cwes', # 0xdf
'cwess', # 0xe0
'cweng', # 0xe1
'cwej', # 0xe2
'cwec', # 0xe3
'cwek', # 0xe4
'cwet', # 0xe5
'cwep', # 0xe6
'cweh', # 0xe7
'cwi', # 0xe8
'cwig', # 0xe9
'cwigg', # 0xea
'cwigs', # 0xeb
'cwin', # 0xec
'cwinj', # 0xed
'cwinh', # 0xee
'cwid', # 0xef
'cwil', # 0xf0
'cwilg', # 0xf1
'cwilm', # 0xf2
'cwilb', # 0xf3
'cwils', # 0xf4
'cwilt', # 0xf5
'cwilp', # 0xf6
'cwilh', # 0xf7
'cwim', # 0xf8
'cwib', # 0xf9
'cwibs', # 0xfa
'cwis', # 0xfb
'cwiss', # 0xfc
'cwing', # 0xfd
'cwij', # 0xfe
'cwic', # 0xff
)
| gpl-2.0 |
wkentaro/legit | legit/cli.py | 1 | 18417 | # -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import sys
from subprocess import call
from time import sleep
import clint.resources
try:
from clint import Args
args = Args()
except ImportError:
from clint import args
from clint.eng import join as eng_join
from clint.textui import colored, puts, columns, indent
from .core import __version__
from .settings import settings
from .helpers import is_lin, is_osx, is_win
from .scm import *
def black(s):
if settings.allow_black_foreground:
return colored.black(s)
else:
return s.encode('utf-8')
# --------
# Dispatch
# --------
def main():
"""Primary Legit command dispatch."""
command = Command.lookup(args.get(0))
if command:
arg = args.get(0)
args.remove(arg)
command.__call__(args)
sys.exit()
elif args.contains(('-h', '--help')):
display_help()
sys.exit(1)
elif args.contains(('-v', '--version')):
display_version()
sys.exit(1)
else:
if settings.git_transparency:
# Send everything to git
git_args = list(sys.argv)
if settings.git_transparency is True:
settings.git_transparency = os.environ.get("GIT_PYTHON_GIT_EXECUTABLE", 'git')
git_args[0] = settings.git_transparency
sys.exit(call(' '.join(git_args), shell=True))
else:
show_error(colored.red('Unknown command {0}'.format(args.get(0))))
display_info()
sys.exit(1)
def show_error(msg):
sys.stdout.flush()
sys.stderr.write(msg + '\n')
# -------
# Helpers
# -------
def status_log(func, message, *args, **kwargs):
"""Executes a callable with a header message."""
print(message)
log = func(*args, **kwargs)
if log:
out = []
for line in log.split('\n'):
if not line.startswith('#'):
out.append(line)
print(black('\n'.join(out)))
def switch_to(branch):
"""Runs the cmd_switch command with given branch arg."""
switch_args = args.copy
switch_args._args = [branch]
return cmd_switch(switch_args)
def fuzzy_match_branch(branch):
if not branch: return False
all_branches = get_branch_names()
if branch in all_branches:
return branch
def branch_fuzzy_match(b): return b.startswith(branch)
possible_branches = filter(branch_fuzzy_match, all_branches)
if len(possible_branches) == 1:
return possible_branches[0]
return False
# --------
# Commands
# --------
def cmd_switch(args):
"""Legit Switch command."""
from_branch = get_current_branch_name()
to_branch = args.get(0)
to_branch = fuzzy_match_branch(to_branch)
if not to_branch:
print('Please specify a branch to switch to:')
display_available_branches()
sys.exit()
if repo.is_dirty():
status_log(stash_it, 'Saving local changes.')
status_log(checkout_branch, 'Switching to {0}.'.format(
colored.yellow(to_branch)), to_branch)
if unstash_index(branch=from_branch):
status_log(unstash_it, 'Restoring local changes.', branch=from_branch)
def cmd_resync(args):
"""Stashes unstaged changes,
Fetches, Auto-Merge/Rebase upstream data from specified upstream branch,
Performs smart pull+merge for current branch,
Pushes local commits up, and Unstashes changes.
Default upstream branch is 'master'.
"""
if args.get(0):
upstream = fuzzy_match_branch(args.get(0))
if upstream:
is_external = True
original_branch = get_current_branch_name()
else:
print("{0} doesn't exist. Use a branch that does.".format(
colored.yellow(args.get(0))))
sys.exit(1)
else:
upstream = "master"
original_branch = get_current_branch_name()
if repo.is_dirty():
status_log(stash_it, 'Saving local changes.', sync=True)
# Update upstream branch
switch_to(upstream)
status_log(smart_pull, 'Pulling commits from the server.')
# Update original branch with upstream
switch_to(original_branch)
status_log(smart_merge, 'Grafting commits from {0}.'.format(
colored.yellow(upstream)), upstream, allow_rebase=False)
if unstash_index(sync=True):
status_log(unstash_it, 'Restoring local changes.', sync=True)
# Sync original_branch
status_log(smart_pull, 'Pulling commits from the server.')
status_log(push, 'Pushing commits to the server.', original_branch)
def cmd_sync(args):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
if args.get(0):
# Optional branch specifier.
branch = fuzzy_match_branch(args.get(0))
if branch:
is_external = True
original_branch = get_current_branch_name()
else:
print("{0} doesn't exist. Use a branch that does.".format(
colored.yellow(args.get(0))))
sys.exit(1)
else:
# Sync current branch.
branch = get_current_branch_name()
is_external = False
if branch in get_branch_names(local=False):
if is_external:
switch_to(branch)
if repo.is_dirty():
status_log(stash_it, 'Saving local changes.', sync=True)
status_log(smart_pull, 'Pulling commits from the server.')
status_log(push, 'Pushing commits to the server.', branch)
if unstash_index(sync=True):
status_log(unstash_it, 'Restoring local changes.', sync=True)
if is_external:
switch_to(original_branch)
else:
print('{0} has not been published yet.'.format(
colored.yellow(branch)))
sys.exit(1)
def cmd_sprout(args):
"""Creates a new branch of given name from given branch.
Defaults to current branch.
"""
off_branch = args.get(0)
new_branch = args.get(1)
if (off_branch is None) and (new_branch is None):
# new_branch is required, so should be passed at least 1 arg
show_error('Please pass new branch name to create.')
help('sprout', to_stderr=True)
sys.exit(1)
elif new_branch is None:
# off_branch is optional, so use specified one as new_branch
new_branch = args.get(0)
off_branch = get_current_branch_name()
else:
off_branch = fuzzy_match_branch(off_branch)
branch_names = get_branch_names()
if off_branch not in branch_names:
print("{0} doesn't exist. Use a branch that does.".format(
colored.yellow(off_branch)))
sys.exit(1)
if new_branch in branch_names:
print("{0} already exists. Use a unique name.".format(
colored.yellow(new_branch)))
sys.exit(1)
if repo.is_dirty():
status_log(stash_it, 'Saving local changes.')
status_log(sprout_branch, 'Branching {0} to {1}.'.format(
colored.yellow(off_branch), colored.yellow(new_branch)),
off_branch, new_branch)
def cmd_graft(args):
"""Merges an unpublished branch into the given branch, then deletes it."""
branch = fuzzy_match_branch(args.get(0))
into_branch = args.get(1)
if not branch:
print('Please specify a branch to graft:')
display_available_branches()
sys.exit()
if not into_branch:
into_branch = get_current_branch_name()
else:
into_branch = fuzzy_match_branch(into_branch)
branch_names = get_branch_names(local=True, remote_branches=False)
remote_branch_names = get_branch_names(local=False, remote_branches=True)
if branch not in branch_names:
print("{0} doesn't exist. Use a branch that does.".format(
colored.yellow(branch)))
sys.exit(1)
if branch in remote_branch_names:
print("{0} is published. To graft it, unpublish it first.".format(
colored.yellow(branch)))
sys.exit(1)
if into_branch not in branch_names:
print("{0} doesn't exist. Use a branch that does.".format(
colored.yellow(into_branch)))
sys.exit(1)
# Go to new branch.
switch_to(into_branch)
status_log(graft_branch, 'Grafting {0} into {1}.'.format(
colored.yellow(branch), colored.yellow(into_branch)), branch)
def cmd_publish(args):
"""Pushes an unpublished branch to a remote repository."""
branch = fuzzy_match_branch(args.get(0))
if not branch:
branch = get_current_branch_name()
display_available_branches()
if args.get(0) is None:
print("Using current branch {0}".format(colored.yellow(branch)))
else:
print("Branch {0} not found, using current branch {1}".format(colored.red(args.get(0)),colored.yellow(branch)))
branch_names = get_branch_names(local=False)
if branch in branch_names:
print("{0} is already published. Use a branch that isn't.".format(
colored.yellow(branch)))
sys.exit(1)
status_log(publish_branch, 'Publishing {0}.'.format(
colored.yellow(branch)), branch)
def cmd_unpublish(args):
"""Removes a published branch from the remote repository."""
branch = fuzzy_match_branch(args.get(0))
if not branch:
print('Please specify a branch to unpublish:')
display_available_branches()
sys.exit()
branch_names = get_branch_names(local=False)
if branch not in branch_names:
print("{0} isn't published. Use a branch that is.".format(
colored.yellow(branch)))
sys.exit(1)
status_log(unpublish_branch, 'Unpublishing {0}.'.format(
colored.yellow(branch)), branch)
def cmd_harvest(args):
"""Syncs a branch with given branch. Defaults to current."""
from_branch = fuzzy_match_branch(args.get(0))
to_branch = fuzzy_match_branch(args.get(1))
if not from_branch:
print('Please specify a branch to harvest commits from:')
display_available_branches()
sys.exit()
if to_branch:
original_branch = get_current_branch_name()
is_external = True
else:
is_external = False
branch_names = get_branch_names(local=True, remote_branches=False)
if from_branch not in branch_names:
print("{0} isn't an available branch. Use a branch that is.".format(
colored.yellow(from_branch)))
sys.exit(1)
if is_external:
switch_to(to_branch)
if repo.is_dirty():
status_log(stash_it, 'Saving local changes.')
status_log(smart_merge, 'Grafting commits from {0}.'.format(
colored.yellow(from_branch)), from_branch, allow_rebase=False)
if is_external:
switch_to(original_branch)
if unstash_index():
status_log(unstash_it, 'Restoring local changes.')
#
def cmd_branches(args):
"""Displays available branches."""
display_available_branches()
def cmd_settings(args):
"""Opens legit settings in editor."""
path = clint.resources.user.open('config.ini').name
print('Legit Settings:\n')
for (option, _, description) in settings.config_defaults:
print(columns([colored.yellow(option), 25], [description, None]))
print('\nSee {0} for more details.'.format(settings.config_url))
sleep(0.35)
if is_osx:
editor = os.environ.get('EDITOR') or os.environ.get('VISUAL') or 'open'
os.system("{0} '{1}'".format(editor, path))
elif is_lin:
editor = os.environ.get('EDITOR') or os.environ.get('VISUAL') or 'pico'
os.system("{0} '{1}'".format(editor, path))
elif is_win:
os.system("'{0}'".format(path))
else:
print("Edit '{0}' to manage Legit settings.\n".format(path))
sys.exit()
def cmd_install(args):
"""Installs legit git aliases."""
aliases = [
'branches',
'graft',
'harvest',
'publish',
'unpublish',
'sprout',
'sync',
'switch',
'resync',
]
print('The following git aliases have been installed:\n')
for alias in aliases:
cmd = '!legit ' + alias
os.system('git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd))
print(columns(['', 1], [colored.yellow('git ' + alias), 20], [cmd, None]))
sys.exit()
def cmd_help(args):
"""Display help for individual commands."""
command = args.get(0)
help(command)
# -----
# Views
# -----
def help(command, to_stderr=False):
if command == None:
command = 'help'
cmd = Command.lookup(command)
usage = cmd.usage or ''
help = cmd.help or ''
help_text = '%s\n\n%s' % (usage, help)
if to_stderr:
show_error(help_text)
else:
print(help_text)
def display_available_branches():
"""Displays available branches."""
branches = get_branches()
if not branches:
print(colored.red('No branches available'))
return
branch_col = len(max([b.name for b in branches], key=len)) + 1
for branch in branches:
try:
branch_is_selected = (branch.name == get_current_branch_name())
except TypeError:
branch_is_selected = False
marker = '*' if branch_is_selected else ' '
color = colored.green if branch_is_selected else colored.yellow
pub = '(published)' if branch.is_published else '(unpublished)'
print(columns(
[colored.red(marker), 2],
[color(branch.name), branch_col],
[black(pub), 14]
))
def display_info():
"""Displays Legit informatics."""
puts('{0}. {1}\n'.format(
colored.red('legit'),
black('A Kenneth Reitz Project')
))
puts('Usage: {0}\n'.format(colored.blue('legit <command>')))
puts('Commands:\n')
for command in Command.all_commands():
usage = command.usage or command.name
detail = command.help or ''
puts(colored.green(usage))
with indent(2):
puts(first_sentence(detail))
def first_sentence(s):
pos = s.find('. ')
if pos < 0:
pos = len(s) - 1
return s[:pos + 1]
def display_help():
"""Displays Legit help."""
display_info()
def display_version():
"""Displays Legit version/release."""
puts('{0} v{1}'.format(
colored.yellow('legit'),
__version__
))
def handle_abort(aborted, type=None):
print('{0} {1}'.format(colored.red('Error:'), aborted.message))
print(black(str(aborted.log)))
if type == 'merge':
print('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
print('It seems that the remote branch has been already deleted.')
sys.exit(1)
settings.abort_handler = handle_abort
class Command(object):
COMMANDS = {}
SHORT_MAP = {}
@classmethod
def register(klass, command):
klass.COMMANDS[command.name] = command
if command.short:
for short in command.short:
klass.SHORT_MAP[short] = command
@classmethod
def lookup(klass, name):
if name in klass.SHORT_MAP:
return klass.SHORT_MAP[name]
if name in klass.COMMANDS:
return klass.COMMANDS[name]
else:
return None
@classmethod
def all_commands(klass):
return sorted(klass.COMMANDS.values(),
key=lambda cmd: cmd.name)
def __init__(self, name=None, short=None, fn=None, usage=None, help=None):
self.name = name
self.short = short
self.fn = fn
self.usage = usage
self.help = help
def __call__(self, *args, **kw_args):
return self.fn(*args, **kw_args)
def def_cmd(name=None, short=None, fn=None, usage=None, help=None):
command = Command(name=name, short=short, fn=fn, usage=usage, help=help)
Command.register(command)
def_cmd(
name='branches',
fn=cmd_branches,
usage='branches',
help='Get a nice pretty list of branches.')
def_cmd(
name='graft',
short=['gr'],
fn=cmd_graft,
usage='graft <branch> <into-branch>',
help=('Merges specified branch into the second branch, and removes it. '
'You can only graft unpublished branches.'))
def_cmd(
name='harvest',
short=['ha', 'hv', 'har'],
usage='harvest [<branch>] <into-branch>',
help=('Auto-Merge/Rebase of specified branch changes into the second '
'branch.'),
fn=cmd_harvest)
def_cmd(
name='help',
short=['h'],
fn=cmd_help,
usage='help <command>',
help='Display help for legit command.')
def_cmd(
name='install',
fn=cmd_install,
usage='install',
help='Installs legit git aliases.')
def_cmd(
name='publish',
short=['pub'],
fn=cmd_publish,
usage='publish [<branch>]',
help='Publishes specified branch to the remote.')
def_cmd(
name='settings',
fn=cmd_settings,
usage='settings',
help='Opens legit settings in a text editor.')
def_cmd(
name='sprout',
short=['sp'],
fn=cmd_sprout,
usage='sprout [<branch>] <new-branch>',
help=('Creates a new branch off of the specified branch. Defaults to '
'current branch. Switches to it immediately.'))
def_cmd(
name='switch',
short=['sw'],
fn=cmd_switch,
usage='switch <branch>',
help=('Switches to specified branch. Automatically stashes and unstashes '
'any changes.'))
def_cmd(
name='sync',
short=['sy'],
fn=cmd_sync,
usage='sync <branch>',
help=('Synchronizes the given branch. Defaults to current branch. Stash, '
'Fetch, Auto-Merge/Rebase, Push, and Unstash.'))
def_cmd(
name='resync',
short=['rs'],
fn=cmd_resync,
usage='resync <upstream-branch>',
help=('Re-synchronize current branch with specified upstream branch. '
"Defaults upstream branch is 'master'. "
'Fetch, Auto-Merge/Rebase for upstream, '
'Fetch, Auto-Merge/Rebase, Push, and Unstash for current branch.'))
def_cmd(
name='unpublish',
short=['unp'],
fn=cmd_unpublish,
usage='unpublish <branch>',
help='Removes specified branch from the remote.')
| bsd-3-clause |
guschmue/tensorflow | tensorflow/python/client/timeline.py | 38 | 24052 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Timeline visualization for TensorFlow using Chrome Trace Format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import re
# The timeline target is usually imported as part of BUILD target
# "platform_test", which includes also includes the "platform"
# dependency. This is why the logging import here is okay.
from tensorflow.python.platform import tf_logging as logging
class AllocationMaximum(collections.namedtuple(
'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))):
"""Stores the maximum allocation for a given allocator within the timelne.
Parameters:
timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached.
num_bytes: the total memory used at this time.
tensors: the set of tensors allocated at this time.
"""
pass
class StepStatsAnalysis(collections.namedtuple(
'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))):
"""Stores the step stats analysis output.
Parameters:
chrome_trace: A dict containing the chrome trace analysis.
allocator_maximums: A dict mapping allocator names to AllocationMaximum.
"""
pass
class _ChromeTraceFormatter(object):
"""A helper class for generating traces in Chrome Trace Format."""
def __init__(self, show_memory=False):
"""Constructs a new Chrome Trace formatter."""
self._show_memory = show_memory
self._events = []
self._metadata = []
def _create_event(self, ph, category, name, pid, tid, timestamp):
"""Creates a new Chrome Trace event.
For details of the file format, see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
Args:
ph: The type of event - usually a single character.
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
Returns:
A JSON compatible event object.
"""
event = {}
event['ph'] = ph
event['cat'] = category
event['name'] = name
event['pid'] = pid
event['tid'] = tid
event['ts'] = timestamp
return event
def emit_pid(self, name, pid):
"""Adds a process metadata event to the trace.
Args:
name: The process name as a string.
pid: Identifier of the process as an integer.
"""
event = {}
event['name'] = 'process_name'
event['ph'] = 'M'
event['pid'] = pid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_tid(self, name, pid, tid):
"""Adds a thread metadata event to the trace.
Args:
name: The thread name as a string.
pid: Identifier of the process as an integer.
tid: Identifier of the thread as an integer.
"""
event = {}
event['name'] = 'thread_name'
event['ph'] = 'M'
event['pid'] = pid
event['tid'] = tid
event['args'] = {'name': name}
self._metadata.append(event)
def emit_region(self, timestamp, duration, pid, tid, category, name, args):
"""Adds a region event to the trace.
Args:
timestamp: The start timestamp of this region as a long integer.
duration: The duration of this region as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
category: The event category as a string.
name: The event name as a string.
args: A JSON-compatible dictionary of event arguments.
"""
event = self._create_event('X', category, name, pid, tid, timestamp)
event['dur'] = duration
event['args'] = args
self._events.append(event)
def emit_obj_create(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object creation event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('N', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id):
"""Adds an object deletion event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
"""
event = self._create_event('D', category, name, pid, tid, timestamp)
event['id'] = object_id
self._events.append(event)
def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id,
snapshot):
"""Adds an object snapshot event to the trace.
Args:
category: The event category as a string.
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
object_id: Identifier of the object as an integer.
snapshot: A JSON-compatible representation of the object.
"""
event = self._create_event('O', category, name, pid, tid, timestamp)
event['id'] = object_id
event['args'] = {'snapshot': snapshot}
self._events.append(event)
def emit_flow_start(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow start event to the trace.
When matched with a flow end event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_flow_end(self, name, timestamp, pid, tid, flow_id):
"""Adds a flow end event to the trace.
When matched with a flow start event (with the same 'flow_id') this will
cause the trace viewer to draw an arrow between the start and end events.
Args:
name: The event name as a string.
timestamp: The timestamp of this event as a long integer.
pid: Identifier of the process generating this event as an integer.
tid: Identifier of the thread generating this event as an integer.
flow_id: Identifier of the flow as an integer.
"""
event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp)
event['id'] = flow_id
self._events.append(event)
def emit_counter(self, category, name, pid, timestamp, counter, value):
"""Emits a record for a single counter.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counter: Name of the counter as a string.
value: Value of the counter as an integer.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = {counter: value}
self._events.append(event)
def emit_counters(self, category, name, pid, timestamp, counters):
"""Emits a counter record for the dictionary 'counters'.
Args:
category: The event category as a string.
name: The event name as a string.
pid: Identifier of the process generating this event as an integer.
timestamp: The timestamp of this event as a long integer.
counters: Dictionary of counter values.
"""
event = self._create_event('C', category, name, pid, 0, timestamp)
event['args'] = counters.copy()
self._events.append(event)
def format_to_string(self, pretty=False):
"""Formats the chrome trace to a string.
Args:
pretty: (Optional.) If True, produce human-readable JSON output.
Returns:
A JSON-formatted string in Chrome Trace format.
"""
trace = {}
trace['traceEvents'] = self._metadata + self._events
if pretty:
return json.dumps(trace, indent=4, separators=(',', ': '))
else:
return json.dumps(trace, separators=(',', ':'))
class _TensorTracker(object):
"""An internal class to track the lifetime of a Tensor."""
def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes):
"""Creates an object to track tensor references.
This class is not thread safe and is intended only for internal use by
the 'Timeline' class in this file.
Args:
name: The name of the Tensor as a string.
object_id: Chrome Trace object identifier assigned for this Tensor.
timestamp: The creation timestamp of this event as a long integer.
pid: Process identifier of the associated device, as an integer.
allocator: Name of the allocator used to create the Tensor.
num_bytes: Number of bytes allocated (long integer).
Returns:
A 'TensorTracker' object.
"""
self._name = name
self._pid = pid
self._object_id = object_id
self._create_time = timestamp
self._allocator = allocator
self._num_bytes = num_bytes
self._ref_times = []
self._unref_times = []
@property
def name(self):
"""Name of this tensor."""
return self._name
@property
def pid(self):
"""ID of the process which created this tensor (an integer)."""
return self._pid
@property
def create_time(self):
"""Timestamp when this tensor was created (long integer)."""
return self._create_time
@property
def object_id(self):
"""Returns the object identifier of this tensor (integer)."""
return self._object_id
@property
def num_bytes(self):
"""Size of this tensor in bytes (long integer)."""
return self._num_bytes
@property
def allocator(self):
"""Name of the allocator used to create this tensor (string)."""
return self._allocator
@property
def last_unref(self):
"""Last unreference timestamp of this tensor (long integer)."""
return max(self._unref_times)
def add_ref(self, timestamp):
"""Adds a reference to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object reference as an integer.
"""
self._ref_times.append(timestamp)
def add_unref(self, timestamp):
"""Adds an unref to this tensor with the specified timestamp.
Args:
timestamp: Timestamp of object unreference as an integer.
"""
self._unref_times.append(timestamp)
class Timeline(object):
"""A class for visualizing execution timelines of TensorFlow steps."""
def __init__(self, step_stats, graph=None):
"""Constructs a new Timeline.
A 'Timeline' is used for visualizing the execution of a TensorFlow
computation. It shows the timings and concurrency of execution at
the granularity of TensorFlow Ops.
This class is not thread safe.
Args:
step_stats: The 'StepStats' proto recording execution times.
graph: (Optional) The 'Graph' that was executed.
"""
self._step_stats = step_stats
self._graph = graph
self._chrome_trace = _ChromeTraceFormatter()
self._next_pid = 0
self._device_pids = {} # device name -> pid for compute activity.
self._tensor_pids = {} # device name -> pid for tensors.
self._tensors = {} # tensor_name -> TensorTracker
self._next_flow_id = 0
self._flow_starts = {} # tensor_name -> (timestamp, pid, tid)
self._alloc_times = {} # tensor_name -> ( time, allocator, size )
self._allocator_maximums = {} # allocator name => maximum bytes long
def _alloc_pid(self):
"""Allocate a process Id."""
pid = self._next_pid
self._next_pid += 1
return pid
def _alloc_flow_id(self):
"""Allocate a flow Id."""
flow_id = self._next_flow_id
self._next_flow_id += 1
return flow_id
def _parse_op_label(self, label):
"""Parses the fields in a node timeline label."""
# Expects labels of the form: name = op(arg, arg, ...).
match = re.match(r'(.*) = (.*)\((.*)\)', label)
if match is None:
return 'unknown', 'unknown', []
nn, op, inputs = match.groups()
if not inputs:
inputs = []
else:
inputs = inputs.split(', ')
return nn, op, inputs
def _assign_lanes(self):
"""Assigns non-overlapping lanes for the activities on each device."""
for device_stats in self._step_stats.dev_stats:
# TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful.
lanes = [0]
for ns in device_stats.node_stats:
l = -1
for (i, lts) in enumerate(lanes):
if ns.all_start_micros > lts:
l = i
lanes[l] = ns.all_start_micros + ns.all_end_rel_micros
break
if l < 0:
l = len(lanes)
lanes.append(ns.all_start_micros + ns.all_end_rel_micros)
ns.thread_id = l
def _emit_op(self, nodestats, pid, is_gputrace):
"""Generates a Chrome Trace event to show Op execution.
Args:
nodestats: The 'NodeExecStats' proto recording op execution.
pid: The pid assigned for the device where this op ran.
is_gputrace: If True then this op came from the GPUTracer.
"""
node_name = nodestats.node_name
start = nodestats.all_start_micros
duration = nodestats.all_end_rel_micros
tid = nodestats.thread_id
inputs = []
if is_gputrace:
# Node names should always have the form 'name:op'.
fields = node_name.split(':') + ['unknown']
node_name, op = fields[:2]
elif node_name == 'RecvTensor':
# RPC tracing does not use the standard timeline_label format.
op = 'RecvTensor'
else:
_, op, inputs = self._parse_op_label(nodestats.timeline_label)
args = {'name': node_name, 'op': op}
for i, iname in enumerate(inputs):
args['input%d' % i] = iname
self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value):
"""Generate Chrome Trace snapshot event for a computed Tensor.
Args:
tensor: A 'TensorTracker' object.
timestamp: The timestamp of this snapshot as a long integer.
pid: The pid assigned for showing the device where this op ran.
tid: The tid of the thread computing the tensor snapshot.
value: A JSON-compliant snapshot of the object.
"""
desc = str(value.tensor_description).replace('"', '')
snapshot = {'tensor_description': desc}
self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid,
tid, tensor.object_id, snapshot)
def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes):
object_id = len(self._tensors)
tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator,
num_bytes)
self._tensors[name] = tensor
return tensor
def _is_gputrace_device(self, device_name):
"""Returns true if this device is part of the GPUTracer logging."""
return '/stream:' in device_name or '/memcpy' in device_name
def _allocate_pids(self):
"""Allocate fake process ids for each device in the StepStats."""
self._allocators_pid = self._alloc_pid()
self._chrome_trace.emit_pid('Allocators', self._allocators_pid)
# Add processes in the Chrome trace to show compute and data activity.
for dev_stats in self._step_stats.dev_stats:
device_pid = self._alloc_pid()
self._device_pids[dev_stats.device] = device_pid
tensors_pid = self._alloc_pid()
self._tensor_pids[dev_stats.device] = tensors_pid
self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid)
self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid)
def _analyze_tensors(self, show_memory):
"""Analyze tensor references to track dataflow."""
for dev_stats in self._step_stats.dev_stats:
device_pid = self._device_pids[dev_stats.device]
tensors_pid = self._tensor_pids[dev_stats.device]
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
node_name = node_stats.node_name
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
for index, output in enumerate(node_stats.output):
if index:
output_name = '%s:%d' % (node_name, index)
else:
output_name = node_name
allocation = output.tensor_description.allocation_description
num_bytes = allocation.requested_bytes
allocator_name = allocation.allocator_name
tensor = self._produce_tensor(output_name, start_time, tensors_pid,
allocator_name, num_bytes)
tensor.add_ref(start_time)
tensor.add_unref(end_time)
self._flow_starts[output_name] = (end_time, device_pid, tid)
if show_memory:
self._chrome_trace.emit_obj_create('Tensor', output_name,
start_time, tensors_pid, tid,
tensor.object_id)
self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid,
output)
def _show_compute(self, show_dataflow):
"""Visualize the computation activity."""
for dev_stats in self._step_stats.dev_stats:
device_name = dev_stats.device
device_pid = self._device_pids[device_name]
is_gputrace = self._is_gputrace_device(device_name)
for node_stats in dev_stats.node_stats:
tid = node_stats.thread_id
start_time = node_stats.all_start_micros
end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros
self._emit_op(node_stats, device_pid, is_gputrace)
if is_gputrace or node_stats.node_name == 'RecvTensor':
continue
_, _, inputs = self._parse_op_label(node_stats.timeline_label)
for input_name in inputs:
if input_name not in self._tensors:
# This can happen when partitioning has inserted a Send/Recv.
# We remove the numeric suffix so that the dataflow appears to
# come from the original node. Ideally, the StepStats would
# contain logging for the Send and Recv nodes.
index = input_name.rfind('/_')
if index > 0:
input_name = input_name[:index]
if input_name in self._tensors:
tensor = self._tensors[input_name]
tensor.add_ref(start_time)
tensor.add_unref(end_time - 1)
if show_dataflow:
# We use a different flow ID for every graph edge.
create_time, create_pid, create_tid = self._flow_starts[
input_name]
# Don't add flows when producer and consumer ops are on the same
# pid/tid since the horizontal arrows clutter the visualization.
if create_pid != device_pid or create_tid != tid:
flow_id = self._alloc_flow_id()
self._chrome_trace.emit_flow_start(input_name, create_time,
create_pid, create_tid,
flow_id)
self._chrome_trace.emit_flow_end(input_name, start_time,
device_pid, tid, flow_id)
else:
logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?',
input_name)
def _show_memory_counters(self):
"""Produce a counter series for each memory allocator."""
# Iterate over all tensor trackers to build a list of allocations and
# frees for each allocator. Then sort the lists and emit a cumulative
# counter series for each allocator.
allocations = {}
for name in self._tensors:
tensor = self._tensors[name]
self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref,
tensor.pid, 0, tensor.object_id)
allocator = tensor.allocator
if allocator not in allocations:
allocations[allocator] = []
num_bytes = tensor.num_bytes
allocations[allocator].append((tensor.create_time, num_bytes, name))
allocations[allocator].append((tensor.last_unref, -num_bytes, name))
alloc_maxes = {}
# Generate a counter series showing total allocations for each allocator.
for allocator in allocations:
alloc_list = allocations[allocator]
alloc_list.sort()
total_bytes = 0
alloc_tensor_set = set()
alloc_maxes[allocator] = AllocationMaximum(
timestamp=0, num_bytes=0, tensors=set())
for time, num_bytes, name in alloc_list:
total_bytes += num_bytes
if num_bytes < 0:
alloc_tensor_set.discard(name)
else:
alloc_tensor_set.add(name)
if total_bytes > alloc_maxes[allocator].num_bytes:
alloc_maxes[allocator] = AllocationMaximum(
timestamp=time,
num_bytes=total_bytes,
tensors=copy.deepcopy(alloc_tensor_set))
self._chrome_trace.emit_counter('Memory', allocator,
self._allocators_pid, time, allocator,
total_bytes)
self._allocator_maximums = alloc_maxes
def analyze_step_stats(self, show_dataflow=True, show_memory=True):
self._allocate_pids()
self._assign_lanes()
self._analyze_tensors(show_memory)
self._show_compute(show_dataflow)
if show_memory:
self._show_memory_counters()
return StepStatsAnalysis(
chrome_trace=self._chrome_trace,
allocator_maximums=self._allocator_maximums)
def generate_chrome_trace_format(self, show_dataflow=True, show_memory=False):
"""Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
Returns:
A JSON formatted string in Chrome Trace format.
"""
step_stats_analysis = self.analyze_step_stats(
show_dataflow=show_dataflow, show_memory=show_memory)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
| apache-2.0 |
demiaster/rman | scene.py | 1 | 20055 | #! /usr/bin/python
import getpass
import time
# import the python functions
import sys,os.path,subprocess
# import the python renderman library
import prman
###-------------------------------Function Section--------------------------###
"""
Jon Macey's function
function to check if shader exists and compile it, we assume that the shader
is .osl and the compiled shader is .oso If the shader source is newer than the
compiled shader we will compile it. It also assumes that oslc is in the path.
"""
def checkAndCompileShader(shader) :
shader = "shaders/" + shader
if os.path.isfile(shader+'.oso') != True or os.stat(shader+'.osl').st_mtime - os.stat(shader+'.oso').st_mtime > 0 :
print "compiling shader %s" %(shader)
try :
subprocess.check_call(["oslc", shader+".osl"])
except subprocess.CalledProcessError :
sys.exit("shader compilation failed")
def checkAndCompileOldShader(shader) :
shader = "shaders/" + shader
if os.path.isfile(shader+'.slo') != True or os.stat(shader+'.sl').st_mtime - os.stat(shader+'.slo').st_mtime > 0 :
print "compiling old shader %s" %(shader)
try :
subprocess.check_call(["shader", shader+".sl"])
except subprocess.CalledProcessError :
sys.exit("shader compilation failed")
# cube shape for the room
def Room(width,height,depth) :
w=width/2.0
h=height/2.0
d=depth/2.0
ri.ArchiveRecord(ri.COMMENT, 'Cube Generated by Cube Function')
ri.AttributeBegin()
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : [0.6, 0.6, 0.9],
"float roughness" : [0.5],
"float specular" : [0]
})
#rear
face=[-w, -h, d, -w, h, d, w, -h, d, w, h, d]
ri.Patch("bilinear",{'P':face})
#front
face=[-w, -h, -d, -w, h, -d, w, -h, -d, w, h, -d]
ri.Patch("bilinear",{'P':face})
#left
face=[-w, -h, -d, -w, h, -d, -w, -h, d, -w, h, d]
ri.Patch("bilinear",{'P':face})
#right
face=[w, -h, -d, w, h, -d, w, -h, d, w, h, d]
ri.Patch("bilinear",{'P':face})
#bottom
face=[w, -h, d, w, -h, -d, -w, -h, d, -w, -h, -d]
ri.Patch("bilinear",{'P':face})
#top
face=[w, h, d, w, h, -d, -w, h, d, -w, h, -d]
ri.Patch("bilinear",{'P':face})
ri.AttributeEnd()
ri.ArchiveRecord(ri.COMMENT, '--End of Cube Function--')
# hyperboloid shapes in the pin
def hyperboloid_wrapper(height, base_radius, top_radius):
ri.ArchiveRecord(ri.COMMENT, '--Hyperboloid Shape Generated by hyperboloid_wrapper Function--')
ri.TransformBegin()
ri.Rotate(-90, 1, 0, 0)
p_base = [base_radius, 0, 0]
p_top = [top_radius, 0, height]
ri.Hyperboloid(p_base, p_top, 360)
ri.TransformEnd()
ri.ArchiveRecord(ri.COMMENT, '--!End of hyperboloid_wrapper Function!--')
# model pin
def Pin(color):
ri.ArchiveRecord(ri.COMMENT, '--Pin Model Generated by Pin Function--')
#-------------------metal part-------------------#
# the pointy end
baseColorMetal = [0.8, 0.8, 0.8]
metallic = 1
roughnessTip = 0.4
roughnessStick = 0.1
ri.TransformBegin()
end_height = 0.35
metal_radius = 0.06
ri.Translate(0, end_height, 0)
ri.TransformBegin()
ri.AttributeBegin()
ri.Rotate(90, 1, 0, 0)
ri.Pattern("PxrBump","plasticBump",{
"string filename" : "textures/scratch.tx",
"float scale": 0.003,
"int invertT" : 0
})
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : baseColorMetal ,
"float metallic": metallic,
"float roughness": roughnessTip,
"float specular" : [0.5],
"reference normal bumpNormal" : ["plasticBump:resultN"]
})
ri.Cone(end_height, metal_radius, 360)
ri.AttributeEnd()
ri.TransformEnd()
#the metal stick
metal_height = 0.9
ri.TransformBegin()
ri.AttributeBegin()
ri.Rotate(-90, 1, 0, 0)
ri.Pattern("PxrOSL", "metal",{
"string shader" : "shaders/metal",
})
ri.Pattern("PxrOSL", "envmap", {
"string shader" : "shaders/envmap",
"color Cin" : baseColorMetal,
"string image" : "textures/office.tx"
})
ri.Bxdf( "PxrDisney","bxdf", {
"reference color baseColor" : ["envmap:Cout"],
"color baseColor" : baseColorMetal,
"float metallic": metallic,
"reference float roughness" : ["metal:opacity"],
"float specular" : [0.5]
})
ri.Cylinder(metal_radius, 0, metal_height, 360)
ri.AttributeEnd()
ri.TransformEnd()
#------------------!metal part!------------------#
#------------------plastic part------------------#
# base for bowly shaped part
baseColorPlastic = color
ri.TransformBegin()
ri.Translate(0, metal_height, 0)
ri.TransformBegin()
ri.AttributeBegin()
disk_radius = 0.466
ri.Attribute("trace", {
"displacements" : [1]
})
ri.Attribute("displacementbound", {
"sphere" : [1],
"coordinatesystem" : ["shader"]
})
ri.Pattern("PxrOSL","disk", {
"string shader" : "shaders/disk",
"float repetitions" : [2],
"float height" : [0.02]
})
ri.Displacement( "shaders/doDisplace", {
"reference float disp" : [ "disk:resultF" ],
"float atten" : [1]
})
ri.Pattern("PxrBump","plasticBump",{
"string filename" : "textures/scratch.tx",
"float scale": 0.003,
"int invertT" : 0
})
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : baseColorPlastic,
"float clearcoat" : [1],
"float roughness" : [0],
"reference normal bumpNormal" : ["plasticBump:resultN"]
})
hyperboloid_wrapper(0, metal_radius, disk_radius)
ri.AttributeEnd()
ri.TransformEnd()
# torus junction
ri.TransformBegin()
small_radius = 0.01
ri.Translate(0, small_radius, 0)
ri.TransformBegin()
ri.Rotate(-90, 1, 0, 0)
ri.AttributeBegin()
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : baseColorPlastic,
"float clearcoat" : [1],
"float roughness" : [0],
})
ri.Torus(disk_radius - small_radius, small_radius, -100, 110, 360)
ri.AttributeEnd()
ri.TransformEnd()
# bowly shaped part
ri.TransformBegin()
ri.AttributeBegin()
bowl_radius = 0.47
ri.Translate(0, -0.05, 0)
ri.Rotate(-90, 1, 0, 0)
y_max = 0.433
y_min = 0.05
ri.Attribute("trace", {
"displacements" : [1]
})
ri.Attribute("displacementbound", {
"sphere" : [1],
"coordinatesystem" : ["shader"]
})
ri.Pattern("PxrOSL","wave", {
"string shader" : "shaders/wave",
"float repetitions" : [2],
"float height" : [0.005]
})
ri.Displacement( "shaders/doDisplace", {
"reference float disp" : [ "wave:resultF" ],
"float atten" : [1]
})
ri.Pattern("PxrBump","plasticBump",{
"string filename" : "textures/scratch.tx",
"float scale": 0.003,
"int invertT" : 0
})
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : baseColorPlastic,
"reference normal bumpNormal" : ["plasticBump:resultN"],
"float clearcoat" : [1],
"float specular" : [1],
"float roughness" : [0]
})
ri.Sphere(bowl_radius, y_min, y_max, 360)
ri.AttributeEnd()
ri.TransformEnd()
# plastic main body
ri.TransformBegin()
ri.Translate(0, y_max - y_min - 0.05, 0)
ri.AttributeBegin()
body_height = 0.7
body_br = 0.2
body_tr = 0.15
ri.Attribute("trace", {
"displacements" : [1]
})
ri.Attribute("displacementbound", {
"sphere" : [1],
"coordinatesystem" : ["shader"]
})
ri.Pattern("PxrOSL","wave", {
"string shader" : "shaders/wave",
"int direction" : [1]
})
ri.Displacement( "shaders/doDisplace", {
"reference float disp" : [ "wave:resultF" ],
"float atten" : [1]
})
ri.Pattern("PxrBump","plasticBump",{
"string filename" : "textures/scratch.tx",
"float scale": 0.0005,
"int invertT" : 0
})
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : baseColorPlastic,
"reference normal bumpNormal" : ["plasticBump:resultN"],
"float clearcoat" : [1],
"float roughness" : [0]
})
hyperboloid_wrapper(body_height, body_br, body_tr)
ri.AttributeEnd()
# top base (tb)
ri.TransformBegin()
ri.Translate(0, body_height, 0)
ri.AttributeBegin()
tb_height = 0.08
tb_tr = 0.375
ri.Attribute("trace", {
"displacements" : [1]
})
ri.Attribute("displacementbound", {
"sphere" : [1],
"coordinatesystem" : ["shader"]
})
ri.Pattern("PxrOSL","wave", {
"string shader" : "shaders/wave",
"int direction" : [1]
})
ri.Displacement( "shaders/doDisplace", {
"reference float disp" : [ "wave:resultF" ],
"float atten" : [1]
})
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : baseColorPlastic,
"float clearcoat" : [1],
"float roughness" : [0]
})
hyperboloid_wrapper(tb_height, body_tr, tb_tr)
ri.AttributeEnd()
# top top (tt)
ri.TransformBegin()
ri.AttributeBegin()
ri.Translate(0, tb_height, 0)
tt_tr = 0.35
ri.Attribute("trace", {
"displacements" : [1]
})
ri.Attribute("displacementbound", {
"sphere" : [1],
"coordinatesystem" : ["shader"]
})
ri.Pattern("PxrOSL","wave", {
"string shader" : "shaders/wave",
"float repetitions" : [2],
"float height" : [0.003],
"int direction" : [0]
})
ri.Displacement( "shaders/doDisplace", {
"reference float disp" : [ "wave:resultF" ],
"float atten" : [1]
})
ri.Pattern("PxrBump","plasticBump",{
"string filename" : "textures/scratch.tx",
"float scale": 0.003,
"int invertT" : 0
})
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : baseColorPlastic,
"reference normal bumpNormal" : ["plasticBump:resultN"],
"float clearcoat" : [1],
"float roughness" : [0]
})
hyperboloid_wrapper(tb_height, tb_tr-0.0005, tt_tr)
ri.AttributeEnd()
# top cup (tc)
ri.TransformBegin()
ri.AttributeBegin()
ri.Translate(0, 2*tb_height, 0)
ri.Rotate(-90, 1, 0, 0)
tc_radius = tt_tr
ri.Attribute("trace", {
"displacements" : [1]
})
ri.Attribute("displacementbound", {
"sphere" : [1],
"coordinatesystem" : ["shader"]
})
ri.Pattern("PxrOSL","topdisk", {
"string shader" : "shaders/topdisk"
})
ri.Displacement( "shaders/doDisplace", {
"reference float disp" : [ "topdisk:resultF" ],
"float atten" : [1]
})
ri.Bxdf( "PxrDisney","bxdf", {
"color baseColor" : baseColorPlastic,
"float clearcoat" : [1],
"float roughness" : [0]
})
ri.Disk(0, tc_radius + 0.005, 360)
ri.AttributeEnd()
ri.TransformEnd()
ri.TransformEnd()
ri.TransformEnd()
ri.TransformEnd()
ri.TransformEnd()
ri.TransformEnd()
ri.TransformEnd()
#-----------------!plastic part!-----------------#
ri.ArchiveRecord(ri.COMMENT, '--!End of Pin Function!--')
def Table():
ri.ArchiveRecord(ri.COMMENT, '--Table Model Generated by Table Function--')
ri.AttributeBegin()
face = [12, 0, 12, 12, 0, -12, -12, 0, 12, -12, 0, -12]
ri.Attribute("trace", {
"displacements" : [1]
})
ri.Attribute("displacementbound", {
"sphere" : [1],
"coordinatesystem" : ["shader"]
})
ri.Pattern("PxrOSL","paper", {
"string shader" : "shaders/paper"
})
ri.Displacement( "shaders/doDisplace", {
"reference float disp" : [ "paper:resultF" ],
"float atten" : [1]
})
ri.Pattern("PxrTexture", "myTexture", {"string filename" : ["textures/ink_paper.tx"],
"int invertT" : [ 0 ]
})
ri.Bxdf( "PxrDisney","bxdf", {
"reference color baseColor" : ["myTexture:resultRGB"],
"float roughness" : [0.5],
"float specular" : [0.1]
})
ri.Patch("bilinear",{'P':face})
ri.AttributeEnd()
ri.ArchiveRecord(ri.COMMENT, '--!End of Table Function!--')
###-------------------------End of Function Section-------------------------###
# check and compile shaders
checkAndCompileShader('wave')
checkAndCompileShader('disk')
checkAndCompileShader('topdisk')
checkAndCompileShader('paper')
checkAndCompileShader('envmap')
checkAndCompileOldShader('doDisplace')
# create an instance for the RenderMan interface
ri = prman.Ri()
# make the generated RIB file nicely indented
ri.Option("rib", {"string asciistyle": "indented"})
filename = "scene.rib"
# begin of RIB archive
ri.Begin(filename)
ri.ArchiveRecord(ri.COMMENT, 'File ' +filename)
ri.ArchiveRecord(ri.COMMENT, "Created by " + getpass.getuser())
ri.ArchiveRecord(ri.COMMENT, "Creation Date: " +time.ctime(time.time()))
# export image
ri.Display("scene.exr", "it", "rgba")
ri.Format(1280,720,1)
#Fix the sampling to 720 to reduce noise, put pixel variance low for better look.
ri.Hider("raytrace" ,{"int incremental" :[1], "int maxsamples" : 512, "int minsamples" : 256 })
ri.PixelVariance (0.01)
ri.ShadingRate(10)
# ri.Exposure(1, 2.2)
#Path tracer for final lighting and shading.
ri.Integrator ("PxrVCM" ,"integrator")
ri.Integrator ("PxrDirectLighting" ,"integrator")
ri.Integrator ("PxrPathTracer" ,"integrator")
# now set the projection to perspective
ri.Projection(ri.PERSPECTIVE,{ri.FOV:30} )
#Move our camera into place.
ri.Rotate(-30,1,0,0)
ri.Translate(0,-4,5)
# camera settings
# fstop, focallength, focaldistance
# first image
ri.DepthOfField(5.6, 0.9, 6.8)
# second image
ri.DepthOfField(5.6, 0.6, 7.5)
# Begin The World
ri.WorldBegin()
#-------------------Lights--------------------
#Add a few lights to brighten up the scene.
ri.AttributeBegin()
ri.Declare("areaLight" ,"string")
ri.AreaLightSource( "PxrStdAreaLight", {ri.HANDLEID:"areaLight",
"float exposure" : [9.5]
})
ri.Bxdf( "PxrDisney","bxdf", {
"color emitColor" : [ 1,1,1]
})
#Light 1
ri.TransformBegin()
ri.Translate(10, 8,4)
ri.Scale(4,4,4)
ri.Geometry("spherelight")
ri.TransformEnd()
#Light 2
ri.TransformBegin()
ri.Translate(-10, 8,4)
ri.Scale(4,4,4)
ri.Geometry("spherelight")
ri.TransformEnd()
ri.TransformBegin()
#Light 3
ri.Translate(10,8,-8)
ri.Scale(4,4,4)
ri.Geometry("spherelight")
ri.TransformEnd()
ri.AttributeEnd()
#-------------------!Lights--------------------
# the room
Room(20, 20, 25)
# the groundplane
ri.TransformBegin()
ri.Rotate(-90, 0, 1, 0)
Table()
ri.TransformEnd()
# first image
# # create and move the pins
# ri.TransformBegin()
# ri.Translate(0.1, 0, -0.2)
# ri.Rotate(-10, 0, 1, 0)
# # first pin
# ri.TransformBegin()
# ri.Translate(-1.2, 0.6, 0.7)
# ri.Rotate(-96, 0, 0, 1)
# ri.Rotate(30, 1, 0, 0)
# ri.Rotate(-20, 0, 1, 0)
# Pin([0.2,0.2,0.8])
# ri.TransformEnd()
# # second pin
# ri.TransformBegin()
# ri.Translate(0.7, 1.72, 0.98)
# ri.Rotate(156, 0, 0, 1)
# ri.Rotate(50, 1, 0, 0)
# Pin([0.9, 0.2, 0.2])
# ri.TransformEnd()
# ri.TransformEnd()
#second image
# create and move the pins
ri.TransformBegin()
ri.Translate(-0.1, 0, -0.2)
# first pin
ri.TransformBegin()
ri.Translate(-1.9, 0.6, 0.7)
ri.Rotate(-96, 0, 0, 1)
ri.Rotate(30, 1, 0, 0)
ri.Rotate(-30, 0, 1, 0)
Pin([0.9, 0.2, 0.2])
ri.TransformEnd()
# second pin
ri.TransformBegin()
ri.Translate(0.7, 0.55, 3.5)
ri.Rotate(-94.5, 1, 0, 0)
ri.Rotate(-15, 0, 0, 1)
ri.Rotate(-40, 0, 1, 0)
Pin([0.2, 0.8, 0.3])
ri.TransformEnd()
ri.TransformEnd()
# end of the world
ri.WorldEnd()
# end of rib file
ri.End()
| bsd-2-clause |
webu/django-categories | doc_src/code_examples/custom_categories3.py | 13 | 1144 | class Category(CategoryBase):
thumbnail = models.FileField(
upload_to=THUMBNAIL_UPLOAD_PATH,
null=True, blank=True,
storage=STORAGE(),)
thumbnail_width = models.IntegerField(blank=True, null=True)
thumbnail_height = models.IntegerField(blank=True, null=True)
order = models.IntegerField(default=0)
alternate_title = models.CharField(
blank=True,
default="",
max_length=100,
help_text="An alternative title to use on pages with this category.")
alternate_url = models.CharField(
blank=True,
max_length=200,
help_text="An alternative URL to use instead of the one derived from "
"the category hierarchy.")
description = models.TextField(blank=True, null=True)
meta_keywords = models.CharField(
blank=True,
default="",
max_length=255,
help_text="Comma-separated keywords for search engines.")
meta_extra = models.TextField(
blank=True,
default="",
help_text="(Advanced) Any additional HTML to be placed verbatim "
"in the <head>") | apache-2.0 |
tsdmgz/ansible | lib/ansible/modules/network/avi/avi_sslkeyandcertificate.py | 27 | 5751 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslkeyandcertificate
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object
description:
- This module is used to configure SSLKeyAndCertificate object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
ca_certs:
description:
- Ca certificates in certificate chain.
certificate:
description:
- Sslcertificate settings for sslkeyandcertificate.
required: true
certificate_management_profile_ref:
description:
- It is a reference to an object of type certificatemanagementprofile.
created_by:
description:
- Creator name.
dynamic_params:
description:
- Dynamic parameters needed for certificate management profile.
enckey_base64:
description:
- Encrypted private key corresponding to the private key (e.g.
- Those generated by an hsm such as thales nshield).
enckey_name:
description:
- Name of the encrypted private key (e.g.
- Those generated by an hsm such as thales nshield).
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
key:
description:
- Private key.
key_params:
description:
- Sslkeyparams settings for sslkeyandcertificate.
name:
description:
- Name of the object.
required: true
status:
description:
- Enum options - ssl_certificate_finished, ssl_certificate_pending.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Enum options - ssl_certificate_type_virtualservice, ssl_certificate_type_system, ssl_certificate_type_ca.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_TYPE_VIRTUALSERVICE.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a SSL Key and Certificate
avi_sslkeyandcertificate:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
key: |
-----BEGIN PRIVATE KEY-----
....
-----END PRIVATE KEY-----
certificate:
self_signed: true
certificate: |
-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE
name: MyTestCert
'''
RETURN = '''
obj:
description: SSLKeyAndCertificate (api/sslkeyandcertificate) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
ca_certs=dict(type='list',),
certificate=dict(type='dict', required=True),
certificate_management_profile_ref=dict(type='str',),
created_by=dict(type='str',),
dynamic_params=dict(type='list',),
enckey_base64=dict(type='str',),
enckey_name=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
key=dict(type='str', no_log=True,),
key_params=dict(type='dict',),
name=dict(type='str', required=True),
status=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslkeyandcertificate',
set(['key']))
if __name__ == '__main__':
main()
| gpl-3.0 |
jherve/ardupilot | Tools/LogAnalyzer/tests/TestPitchRollCoupling.py | 267 | 5809 | from LogAnalyzer import Test,TestResult
import DataflashLog
import collections
class TestPitchRollCoupling(Test):
'''test for divergence between input and output pitch/roll, i.e. mechanical failure or bad PID tuning'''
# TODO: currently we're only checking for roll/pitch outside of max lean angle, will come back later to analyze roll/pitch in versus out values
def __init__(self):
Test.__init__(self)
self.name = "Pitch/Roll"
self.enable = True # TEMP
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if logdata.vehicleType != "ArduCopter":
self.result.status = TestResult.StatusType.NA
return
if not "ATT" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# figure out where each mode begins and ends, so we can treat auto and manual modes differently and ignore acro/tune modes
autoModes = ["RTL","AUTO","LAND","LOITER","GUIDED","CIRCLE","OF_LOITER","HYBRID"] # use NTUN DRol+DPit
manualModes = ["STABILIZE","DRIFT","ALTHOLD","ALT_HOLD","POSHOLD"] # use CTUN RollIn/DesRoll + PitchIn/DesPitch
ignoreModes = ["ACRO","SPORT","FLIP","AUTOTUNE",""] # ignore data from these modes
autoSegments = [] # list of (startLine,endLine) pairs
manualSegments = [] # list of (startLine,endLine) pairs
orderedModes = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
isAuto = False # we always start in a manual control mode
prevLine = 0
mode = ""
for line,modepair in orderedModes.iteritems():
mode = modepair[0].upper()
if prevLine == 0:
prevLine = line
if mode in autoModes:
if not isAuto:
manualSegments.append((prevLine,line-1))
prevLine = line
isAuto = True
elif mode in manualModes:
if isAuto:
autoSegments.append((prevLine,line-1))
prevLine = line
isAuto = False
elif mode in ignoreModes:
if isAuto:
autoSegments.append((prevLine,line-1))
else:
manualSegments.append((prevLine,line-1))
prevLine = 0
else:
raise Exception("Unknown mode in TestPitchRollCoupling: %s" % mode)
# and handle the last segment, which doesn't have an ending
if mode in autoModes:
autoSegments.append((prevLine,logdata.lineCount))
elif mode in manualModes:
manualSegments.append((prevLine,logdata.lineCount))
# figure out max lean angle, the ANGLE_MAX param was added in AC3.1
maxLeanAngle = 45.0
if "ANGLE_MAX" in logdata.parameters:
maxLeanAngle = logdata.parameters["ANGLE_MAX"] / 100.0
maxLeanAngleBuffer = 10 # allow a buffer margin
# ignore anything below this altitude, to discard any data while not flying
minAltThreshold = 2.0
# look through manual+auto flight segments
# TODO: filter to ignore single points outside range?
(maxRoll, maxRollLine) = (0.0, 0)
(maxPitch, maxPitchLine) = (0.0, 0)
for (startLine,endLine) in manualSegments+autoSegments:
# quick up-front test, only fallover into more complex line-by-line check if max()>threshold
rollSeg = logdata.channels["ATT"]["Roll"].getSegment(startLine,endLine)
pitchSeg = logdata.channels["ATT"]["Pitch"].getSegment(startLine,endLine)
if not rollSeg.dictData and not pitchSeg.dictData:
continue
# check max roll+pitch for any time where relative altitude is above minAltThreshold
roll = max(abs(rollSeg.min()), abs(rollSeg.max()))
pitch = max(abs(pitchSeg.min()), abs(pitchSeg.max()))
if (roll>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll)) or (pitch>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch)):
lit = DataflashLog.LogIterator(logdata, startLine)
assert(lit.currentLine == startLine)
while lit.currentLine <= endLine:
relativeAlt = lit["CTUN"]["BarAlt"]
if relativeAlt > minAltThreshold:
roll = lit["ATT"]["Roll"]
pitch = lit["ATT"]["Pitch"]
if abs(roll)>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll):
maxRoll = roll
maxRollLine = lit.currentLine
if abs(pitch)>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch):
maxPitch = pitch
maxPitchLine = lit.currentLine
lit.next()
# check for breaking max lean angles
if maxRoll and abs(maxRoll)>abs(maxPitch):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Roll (%.2f, line %d) > maximum lean angle (%.2f)" % (maxRoll, maxRollLine, maxLeanAngle)
return
if maxPitch:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Pitch (%.2f, line %d) > maximum lean angle (%.2f)" % (maxPitch, maxPitchLine, maxLeanAngle)
return
# TODO: use numpy/scipy to check Roll+RollIn curves for fitness (ignore where we're not airborne)
# ...
| gpl-3.0 |
krischer/LASIF | lasif/tests/components/test_stations_component.py | 1 | 9006 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import inspect
import mock
import obspy
import os
import pytest
from lasif import LASIFNotFoundError
from lasif.components.stations import StationsComponent
from lasif.components.communicator import Communicator
from .test_project_component import comm as project_comm # NOQA
@pytest.fixture
def comm(tmpdir):
tmpdir = str(tmpdir)
# Test data directory
data_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))), "data")
comm = Communicator()
proj_mock = mock.MagicMock()
proj_mock.read_only_caches = False
proj_mock.paths = {"root": data_dir}
comm.register("project", proj_mock)
StationsComponent(
stationxml_folder=os.path.join(data_dir, "station_files",
"stationxml"),
seed_folder=os.path.join(data_dir, "station_files", "seed"),
resp_folder=os.path.join(data_dir, "station_files", "resp"),
cache_folder=tmpdir,
communicator=comm,
component_name="stations")
comm.cache_dir = tmpdir
return comm
def test_has_channel(project):
"""
Tests if the has_channel_method().
"""
assert comm.has_channel(
"HL.ARG..BHZ", obspy.UTCDateTime(2010, 3, 24, 14, 30)) is True
assert comm.has_channel(
"HT.SIGR..HHZ", obspy.UTCDateTime(2010, 3, 24, 14, 30)) is True
assert comm.has_channel(
"KO.KULA..BHZ", obspy.UTCDateTime(2010, 3, 24, 14, 30)) is True
assert comm.has_channel(
"KO.RSDY..BHZ", obspy.UTCDateTime(2010, 3, 24, 14, 30)) is True
assert comm.has_channel(
"HL.ARG..BHZ", obspy.UTCDateTime(1970, 3, 24, 14, 30)) is False
assert comm.has_channel(
"HT.SIGR..HHZ", obspy.UTCDateTime(1970, 3, 24, 14, 30)) is False
assert comm.has_channel(
"KO.KULA..BHZ", obspy.UTCDateTime(1970, 3, 24, 14, 30)) is False
assert comm.has_channel(
"KO.RSDY..BHZ", obspy.UTCDateTime(1970, 3, 24, 14, 30)) is False
def test_station_cache_update(comm):
assert len(os.listdir(comm.cache_dir)) == 0
comm.stations.force_cache_update()
assert os.listdir(comm.cache_dir) == ["station_cache.sqlite"]
# Make sure all files are in there.
filenames = list(set([i["filename"]
for i in comm.stations.get_all_channels()]))
assert sorted(os.path.basename(i) for i in filenames) == \
sorted(
["RESP.AF.DODT..BHE", "RESP.G.FDF.00.BHE", "RESP.G.FDF.00.BHN",
"RESP.G.FDF.00.BHZ", "dataless.BW_FURT", "dataless.IU_PAB",
"IRIS_single_channel_with_response.xml"])
def test_autocreation_of_cache(comm):
# Deleting the cache file and initializing a new stations object will
# create the station cache if it is accessed.
assert len(os.listdir(comm.cache_dir)) == 0
filenames = list(set([i["filename"]
for i in comm.stations.get_all_channels()]))
assert sorted(os.path.basename(i) for i in filenames) == \
sorted(
["RESP.AF.DODT..BHE", "RESP.G.FDF.00.BHE", "RESP.G.FDF.00.BHN",
"RESP.G.FDF.00.BHZ", "dataless.BW_FURT", "dataless.IU_PAB",
"IRIS_single_channel_with_response.xml"])
assert os.listdir(comm.cache_dir) == ["station_cache.sqlite"]
def test_has_channel(comm):
all_channels = comm.stations.get_all_channels()
for channel in all_channels:
# Works for timestamps and UTCDateTime objects.
assert comm.stations.has_channel(channel["channel_id"],
channel["start_date"])
assert comm.stations.has_channel(
channel["channel_id"], obspy.UTCDateTime(channel["start_date"]))
# Slightly after the starttime should still work.
assert comm.stations.has_channel(channel["channel_id"],
channel["start_date"] + 3600)
assert comm.stations.has_channel(
channel["channel_id"],
obspy.UTCDateTime(channel["start_date"] + 3600))
# Slightly before not.
assert not comm.stations.has_channel(channel["channel_id"],
channel["start_date"] - 3600)
assert not comm.stations.has_channel(
channel["channel_id"],
obspy.UTCDateTime(channel["start_date"] - 3600))
# For those that have an endtime, do the same.
if channel["end_date"]:
assert comm.stations.has_channel(channel["channel_id"],
channel["end_date"])
assert comm.stations.has_channel(
channel["channel_id"],
obspy.UTCDateTime(channel["end_date"]))
# Slightly before.
assert comm.stations.has_channel(channel["channel_id"],
channel["end_date"] - 3600)
assert comm.stations.has_channel(
channel["channel_id"],
obspy.UTCDateTime(channel["end_date"] - 3600))
# But not slightly after.
assert not comm.stations.has_channel(channel["channel_id"],
channel["end_date"] + 3600)
assert not comm.stations.has_channel(
channel["channel_id"],
obspy.UTCDateTime(channel["end_date"] + 3600))
else:
# For those that do not have an endtime, a time very far in the
# future should work just fine.
assert comm.stations.has_channel(channel["channel_id"],
obspy.UTCDateTime(2030, 1, 1))
def test_get_station_filename(comm):
all_channels = comm.stations.get_all_channels()
for channel in all_channels:
# Should work for timestamps and UTCDateTime objects.
assert channel["filename"] == comm.stations.get_channel_filename(
channel["channel_id"], channel["start_date"] + 3600)
assert channel["filename"] == comm.stations.get_channel_filename(
channel["channel_id"],
obspy.UTCDateTime(channel["start_date"] + 3600))
with pytest.raises(LASIFNotFoundError):
comm.stations.get_channel_filename(
channel["channel_id"], channel["start_date"] - 3600)
def test_get_details_for_filename(comm):
all_channels = comm.stations.get_all_channels()
for channel in all_channels:
assert channel in \
comm.stations.get_details_for_filename(channel["filename"])
def test_all_coordinates_at_time(comm):
# There is only one stations that start that early.
coords = comm.stations.get_all_channels_at_time(920000000)
assert coords == \
{'IU.PAB.00.BHE':
{'latitude': 39.5446, 'elevation_in_m': 950.0,
'local_depth_in_m': 0.0, 'longitude': -4.349899}}
# Also works with a UTCDateTime object.
coords = comm.stations.get_all_channels_at_time(
obspy.UTCDateTime(920000000))
assert coords == \
{'IU.PAB.00.BHE':
{'latitude': 39.5446, 'elevation_in_m': 950.0,
'local_depth_in_m': 0.0, 'longitude': -4.349899}}
# Most channels have no set endtime or an endtime very far in the
# future.
channels = comm.stations.get_all_channels_at_time(
obspy.UTCDateTime(2030, 1, 1))
assert sorted(channels.keys()) == sorted(
["G.FDF.00.BHZ", "G.FDF.00.BHN", "G.FDF.00.BHE", "AF.DODT..BHE",
"BW.FURT..EHE", "BW.FURT..EHN", "BW.FURT..EHZ", "IU.ANMO.10.BHZ"])
def test_station_filename_generator(project_comm):
"""
Make sure existing stations are not overwritten by creating unique new
station filenames. This is used when downloading new station files.
"""
comm = project_comm
new_seed_filename = comm.stations.get_station_filename(
"HL", "ARG", "", "BHZ", "datalessSEED")
existing_seed_filename = glob.glob(os.path.join(
comm.project.paths["dataless_seed"], "dataless.HL_*"))[0]
assert os.path.exists(existing_seed_filename)
assert existing_seed_filename != new_seed_filename
assert os.path.dirname(existing_seed_filename) == \
os.path.dirname(new_seed_filename)
assert os.path.dirname(new_seed_filename) == \
comm.project.paths["dataless_seed"]
# Test RESP file name generation.
resp_filename_1 = comm.stations.get_station_filename(
"A", "B", "C", "D", "RESP")
assert not os.path.exists(resp_filename_1)
assert os.path.dirname(resp_filename_1) == comm.project.paths["resp"]
with open(resp_filename_1, "wt") as fh:
fh.write("blub")
assert os.path.exists(resp_filename_1)
resp_filename_2 = comm.stations.get_station_filename(
"A", "B", "C", "D", "RESP")
assert resp_filename_1 != resp_filename_2
assert os.path.dirname(resp_filename_2) == comm.project.paths["resp"]
| gpl-3.0 |
fraricci/pymatgen | pymatgen/io/prismatic.py | 9 | 1367 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Write Prismatic (http://prism-em.com/) input files.
"""
class Prismatic:
"""
Class to write Prismatic (http://prism-em.com/) input files.
This is designed for STEM image simulation.
"""
def __init__(self, structure, comment="Generated by pymatgen"):
"""
Args:
structure: pymatgen Structure
comment (str): comment
"""
self.structure = structure
self.comment = comment
def to_string(self):
"""
Returns: Prismatic XYZ file. This is similar to XYZ format
but has specific requirements for extra fields, headers, etc.
"""
l = self.structure.lattice
lines = [self.comment, "{} {} {}".format(l.a, l.b, l.c)]
for site in self.structure:
for sp, occu in site.species.items():
lines.append(
"{} {} {} {} {} {}".format(
sp.Z,
site.coords[0],
site.coords[1],
site.coords[2],
occu,
site.properties.get("thermal_sigma", 0),
)
)
lines.append("-1")
return "\n".join(lines)
| mit |
Zhongqilong/mykbengineer | kbe/src/lib/python/Modules/_decimal/libmpdec/literature/fnt.py | 50 | 7169 | #
# Copyright (c) 2008-2016 Stefan Krah. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
######################################################################
# This file lists and checks some of the constants and limits used #
# in libmpdec's Number Theoretic Transform. At the end of the file #
# there is an example function for the plain DFT transform. #
######################################################################
#
# Number theoretic transforms are done in subfields of F(p). P[i]
# are the primes, D[i] = P[i] - 1 are highly composite and w[i]
# are the respective primitive roots of F(p).
#
# The strategy is to convolute two coefficients modulo all three
# primes, then use the Chinese Remainder Theorem on the three
# result arrays to recover the result in the usual base RADIX
# form.
#
# ======================================================================
# Primitive roots
# ======================================================================
#
# Verify primitive roots:
#
# For a prime field, r is a primitive root if and only if for all prime
# factors f of p-1, r**((p-1)/f) =/= 1 (mod p).
#
def prod(F, E):
"""Check that the factorization of P-1 is correct. F is the list of
factors of P-1, E lists the number of occurrences of each factor."""
x = 1
for y, z in zip(F, E):
x *= y**z
return x
def is_primitive_root(r, p, factors, exponents):
"""Check if r is a primitive root of F(p)."""
if p != prod(factors, exponents) + 1:
return False
for f in factors:
q, control = divmod(p-1, f)
if control != 0:
return False
if pow(r, q, p) == 1:
return False
return True
# =================================================================
# Constants and limits for the 64-bit version
# =================================================================
RADIX = 10**19
# Primes P1, P2 and P3:
P = [2**64-2**32+1, 2**64-2**34+1, 2**64-2**40+1]
# P-1, highly composite. The transform length d is variable and
# must divide D = P-1. Since all D are divisible by 3 * 2**32,
# transform lengths can be 2**n or 3 * 2**n (where n <= 32).
D = [2**32 * 3 * (5 * 17 * 257 * 65537),
2**34 * 3**2 * (7 * 11 * 31 * 151 * 331),
2**40 * 3**2 * (5 * 7 * 13 * 17 * 241)]
# Prime factors of P-1 and their exponents:
F = [(2,3,5,17,257,65537), (2,3,7,11,31,151,331), (2,3,5,7,13,17,241)]
E = [(32,1,1,1,1,1), (34,2,1,1,1,1,1), (40,2,1,1,1,1,1)]
# Maximum transform length for 2**n. Above that only 3 * 2**31
# or 3 * 2**32 are possible.
MPD_MAXTRANSFORM_2N = 2**32
# Limits in the terminology of Pollard's paper:
m2 = (MPD_MAXTRANSFORM_2N * 3) // 2 # Maximum length of the smaller array.
M1 = M2 = RADIX-1 # Maximum value per single word.
L = m2 * M1 * M2
P[0] * P[1] * P[2] > 2 * L
# Primitive roots of F(P1), F(P2) and F(P3):
w = [7, 10, 19]
# The primitive roots are correct:
for i in range(3):
if not is_primitive_root(w[i], P[i], F[i], E[i]):
print("FAIL")
# =================================================================
# Constants and limits for the 32-bit version
# =================================================================
RADIX = 10**9
# Primes P1, P2 and P3:
P = [2113929217, 2013265921, 1811939329]
# P-1, highly composite. All D = P-1 are divisible by 3 * 2**25,
# allowing for transform lengths up to 3 * 2**25 words.
D = [2**25 * 3**2 * 7,
2**27 * 3 * 5,
2**26 * 3**3]
# Prime factors of P-1 and their exponents:
F = [(2,3,7), (2,3,5), (2,3)]
E = [(25,2,1), (27,1,1), (26,3)]
# Maximum transform length for 2**n. Above that only 3 * 2**24 or
# 3 * 2**25 are possible.
MPD_MAXTRANSFORM_2N = 2**25
# Limits in the terminology of Pollard's paper:
m2 = (MPD_MAXTRANSFORM_2N * 3) // 2 # Maximum length of the smaller array.
M1 = M2 = RADIX-1 # Maximum value per single word.
L = m2 * M1 * M2
P[0] * P[1] * P[2] > 2 * L
# Primitive roots of F(P1), F(P2) and F(P3):
w = [5, 31, 13]
# The primitive roots are correct:
for i in range(3):
if not is_primitive_root(w[i], P[i], F[i], E[i]):
print("FAIL")
# ======================================================================
# Example transform using a single prime
# ======================================================================
def ntt(lst, dir):
"""Perform a transform on the elements of lst. len(lst) must
be 2**n or 3 * 2**n, where n <= 25. This is the slow DFT."""
p = 2113929217 # prime
d = len(lst) # transform length
d_prime = pow(d, (p-2), p) # inverse of d
xi = (p-1)//d
w = 5 # primitive root of F(p)
r = pow(w, xi, p) # primitive root of the subfield
r_prime = pow(w, (p-1-xi), p) # inverse of r
if dir == 1: # forward transform
a = lst # input array
A = [0] * d # transformed values
for i in range(d):
s = 0
for j in range(d):
s += a[j] * pow(r, i*j, p)
A[i] = s % p
return A
elif dir == -1: # backward transform
A = lst # input array
a = [0] * d # transformed values
for j in range(d):
s = 0
for i in range(d):
s += A[i] * pow(r_prime, i*j, p)
a[j] = (d_prime * s) % p
return a
def ntt_convolute(a, b):
"""convolute arrays a and b."""
assert(len(a) == len(b))
x = ntt(a, 1)
y = ntt(b, 1)
for i in range(len(a)):
y[i] = y[i] * x[i]
r = ntt(y, -1)
return r
# Example: Two arrays representing 21 and 81 in little-endian:
a = [1, 2, 0, 0]
b = [1, 8, 0, 0]
assert(ntt_convolute(a, b) == [1, 10, 16, 0])
assert(21 * 81 == (1*10**0 + 10*10**1 + 16*10**2 + 0*10**3))
| lgpl-3.0 |
muntasirsyed/intellij-community | python/testData/MockSdk3.2/Lib/_abcoll.py | 112 | 15499 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
### collection related types which are not exposed through builtin ###
## iterators ##
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
dict_proxy = type(type.__dict__)
### ONE-TRICK PONIES ###
class Hashable(metaclass=ABCMeta):
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
return NotImplemented
class Iterable(metaclass=ABCMeta):
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Iterator(Iterable):
@abstractmethod
def __next__(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if (any("__next__" in B.__dict__ for B in C.__mro__) and
any("__iter__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Sized(metaclass=ABCMeta):
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Container(metaclass=ABCMeta):
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Callable(metaclass=ABCMeta):
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| apache-2.0 |
edesiocs/namebench | nb_third_party/dns/resolver.py | 215 | 28920 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS stub resolver.
@var default_resolver: The default resolver object
@type default_resolver: dns.resolver.Resolver object"""
import socket
import sys
import time
import dns.exception
import dns.message
import dns.name
import dns.query
import dns.rcode
import dns.rdataclass
import dns.rdatatype
if sys.platform == 'win32':
import _winreg
class NXDOMAIN(dns.exception.DNSException):
"""The query name does not exist."""
pass
# The definition of the Timeout exception has moved from here to the
# dns.exception module. We keep dns.resolver.Timeout defined for
# backwards compatibility.
Timeout = dns.exception.Timeout
class NoAnswer(dns.exception.DNSException):
"""The response did not contain an answer to the question."""
pass
class NoNameservers(dns.exception.DNSException):
"""No non-broken nameservers are available to answer the query."""
pass
class NotAbsolute(dns.exception.DNSException):
"""Raised if an absolute domain name is required but a relative name
was provided."""
pass
class NoRootSOA(dns.exception.DNSException):
"""Raised if for some reason there is no SOA at the root name.
This should never happen!"""
pass
class Answer(object):
"""DNS stub resolver answer
Instances of this class bundle up the result of a successful DNS
resolution.
For convenience, the answer object implements much of the sequence
protocol, forwarding to its rrset. E.g. "for a in answer" is
equivalent to "for a in answer.rrset", "answer[i]" is equivalent
to "answer.rrset[i]", and "answer[i:j]" is equivalent to
"answer.rrset[i:j]".
Note that CNAMEs or DNAMEs in the response may mean that answer
node's name might not be the query name.
@ivar qname: The query name
@type qname: dns.name.Name object
@ivar rdtype: The query type
@type rdtype: int
@ivar rdclass: The query class
@type rdclass: int
@ivar response: The response message
@type response: dns.message.Message object
@ivar rrset: The answer
@type rrset: dns.rrset.RRset object
@ivar expiration: The time when the answer expires
@type expiration: float (seconds since the epoch)
"""
def __init__(self, qname, rdtype, rdclass, response):
self.qname = qname
self.rdtype = rdtype
self.rdclass = rdclass
self.response = response
min_ttl = -1
rrset = None
for count in xrange(0, 15):
try:
rrset = response.find_rrset(response.answer, qname,
rdclass, rdtype)
if min_ttl == -1 or rrset.ttl < min_ttl:
min_ttl = rrset.ttl
break
except KeyError:
if rdtype != dns.rdatatype.CNAME:
try:
crrset = response.find_rrset(response.answer,
qname,
rdclass,
dns.rdatatype.CNAME)
if min_ttl == -1 or crrset.ttl < min_ttl:
min_ttl = crrset.ttl
for rd in crrset:
qname = rd.target
break
continue
except KeyError:
raise NoAnswer
raise NoAnswer
if rrset is None:
raise NoAnswer
self.rrset = rrset
self.expiration = time.time() + min_ttl
def __getattr__(self, attr):
if attr == 'name':
return self.rrset.name
elif attr == 'ttl':
return self.rrset.ttl
elif attr == 'covers':
return self.rrset.covers
elif attr == 'rdclass':
return self.rrset.rdclass
elif attr == 'rdtype':
return self.rrset.rdtype
else:
raise AttributeError(attr)
def __len__(self):
return len(self.rrset)
def __iter__(self):
return iter(self.rrset)
def __getitem__(self, i):
return self.rrset[i]
def __delitem__(self, i):
del self.rrset[i]
def __getslice__(self, i, j):
return self.rrset[i:j]
def __delslice__(self, i, j):
del self.rrset[i:j]
class Cache(object):
"""Simple DNS answer cache.
@ivar data: A dictionary of cached data
@type data: dict
@ivar cleaning_interval: The number of seconds between cleanings. The
default is 300 (5 minutes).
@type cleaning_interval: float
@ivar next_cleaning: The time the cache should next be cleaned (in seconds
since the epoch.)
@type next_cleaning: float
"""
def __init__(self, cleaning_interval=300.0):
"""Initialize a DNS cache.
@param cleaning_interval: the number of seconds between periodic
cleanings. The default is 300.0
@type cleaning_interval: float.
"""
self.data = {}
self.cleaning_interval = cleaning_interval
self.next_cleaning = time.time() + self.cleaning_interval
def maybe_clean(self):
"""Clean the cache if it's time to do so."""
now = time.time()
if self.next_cleaning <= now:
keys_to_delete = []
for (k, v) in self.data.iteritems():
if v.expiration <= now:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.data[k]
now = time.time()
self.next_cleaning = now + self.cleaning_interval
def get(self, key):
"""Get the answer associated with I{key}. Returns None if
no answer is cached for the key.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@rtype: dns.resolver.Answer object or None
"""
self.maybe_clean()
v = self.data.get(key)
if v is None or v.expiration <= time.time():
return None
return v
def put(self, key, value):
"""Associate key and value in the cache.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@param value: The answer being cached
@type value: dns.resolver.Answer object
"""
self.maybe_clean()
self.data[key] = value
def flush(self, key=None):
"""Flush the cache.
If I{key} is specified, only that item is flushed. Otherwise
the entire cache is flushed.
@param key: the key to flush
@type key: (dns.name.Name, int, int) tuple or None
"""
if not key is None:
if self.data.has_key(key):
del self.data[key]
else:
self.data = {}
self.next_cleaning = time.time() + self.cleaning_interval
class Resolver(object):
"""DNS stub resolver
@ivar domain: The domain of this host
@type domain: dns.name.Name object
@ivar nameservers: A list of nameservers to query. Each nameserver is
a string which contains the IP address of a nameserver.
@type nameservers: list of strings
@ivar search: The search list. If the query name is a relative name,
the resolver will construct an absolute query name by appending the search
names one by one to the query name.
@type search: list of dns.name.Name objects
@ivar port: The port to which to send queries. The default is 53.
@type port: int
@ivar timeout: The number of seconds to wait for a response from a
server, before timing out.
@type timeout: float
@ivar lifetime: The total number of seconds to spend trying to get an
answer to the question. If the lifetime expires, a Timeout exception
will occur.
@type lifetime: float
@ivar keyring: The TSIG keyring to use. The default is None.
@type keyring: dict
@ivar keyname: The TSIG keyname to use. The default is None.
@type keyname: dns.name.Name object
@ivar keyalgorithm: The TSIG key algorithm to use. The default is
dns.tsig.default_algorithm.
@type keyalgorithm: string
@ivar edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@ivar ednsflags: The EDNS flags
@type ednsflags: int
@ivar payload: The EDNS payload size. The default is 0.
@type payload: int
@ivar cache: The cache to use. The default is None.
@type cache: dns.resolver.Cache object
"""
def __init__(self, filename='/etc/resolv.conf', configure=True):
"""Initialize a resolver instance.
@param filename: The filename of a configuration file in
standard /etc/resolv.conf format. This parameter is meaningful
only when I{configure} is true and the platform is POSIX.
@type filename: string or file object
@param configure: If True (the default), the resolver instance
is configured in the normal fashion for the operating system
the resolver is running on. (I.e. a /etc/resolv.conf file on
POSIX systems and from the registry on Windows systems.)
@type configure: bool"""
self.reset()
if configure:
if sys.platform == 'win32':
self.read_registry()
elif filename:
self.read_resolv_conf(filename)
def reset(self):
"""Reset all resolver configuration to the defaults."""
self.domain = \
dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
if len(self.domain) == 0:
self.domain = dns.name.root
self.nameservers = []
self.search = []
self.port = 53
self.timeout = 2.0
self.lifetime = 30.0
self.keyring = None
self.keyname = None
self.keyalgorithm = dns.tsig.default_algorithm
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.cache = None
def read_resolv_conf(self, f):
"""Process f as a file in the /etc/resolv.conf format. If f is
a string, it is used as the name of the file to open; otherwise it
is treated as the file itself."""
if isinstance(f, str) or isinstance(f, unicode):
try:
f = open(f, 'r')
except IOError:
# /etc/resolv.conf doesn't exist, can't be read, etc.
# We'll just use the default resolver configuration.
self.nameservers = ['127.0.0.1']
return
want_close = True
else:
want_close = False
try:
for l in f:
if len(l) == 0 or l[0] == '#' or l[0] == ';':
continue
tokens = l.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
self.nameservers.append(tokens[1])
elif tokens[0] == 'domain':
self.domain = dns.name.from_text(tokens[1])
elif tokens[0] == 'search':
for suffix in tokens[1:]:
self.search.append(dns.name.from_text(suffix))
finally:
if want_close:
f.close()
if len(self.nameservers) == 0:
self.nameservers.append('127.0.0.1')
def _determine_split_char(self, entry):
#
# The windows registry irritatingly changes the list element
# delimiter in between ' ' and ',' (and vice-versa) in various
# versions of windows.
#
if entry.find(' ') >= 0:
split_char = ' '
elif entry.find(',') >= 0:
split_char = ','
else:
# probably a singleton; treat as a space-separated list.
split_char = ' '
return split_char
def _config_win32_nameservers(self, nameservers):
"""Configure a NameServer registry entry."""
# we call str() on nameservers to convert it from unicode to ascii
nameservers = str(nameservers)
split_char = self._determine_split_char(nameservers)
ns_list = nameservers.split(split_char)
for ns in ns_list:
if not ns in self.nameservers:
self.nameservers.append(ns)
def _config_win32_domain(self, domain):
"""Configure a Domain registry entry."""
# we call str() on domain to convert it from unicode to ascii
self.domain = dns.name.from_text(str(domain))
def _config_win32_search(self, search):
"""Configure a Search registry entry."""
# we call str() on search to convert it from unicode to ascii
search = str(search)
split_char = self._determine_split_char(search)
search_list = search.split(split_char)
for s in search_list:
if not s in self.search:
self.search.append(dns.name.from_text(s))
def _config_win32_fromkey(self, key):
"""Extract DNS info from a registry key."""
try:
servers, rtype = _winreg.QueryValueEx(key, 'NameServer')
except WindowsError:
servers = None
if servers:
self._config_win32_nameservers(servers)
try:
dom, rtype = _winreg.QueryValueEx(key, 'Domain')
if dom:
self._config_win32_domain(dom)
except WindowsError:
pass
else:
try:
servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')
except WindowsError:
servers = None
if servers:
self._config_win32_nameservers(servers)
try:
dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')
if dom:
self._config_win32_domain(dom)
except WindowsError:
pass
try:
search, rtype = _winreg.QueryValueEx(key, 'SearchList')
except WindowsError:
search = None
if search:
self._config_win32_search(search)
def read_registry(self):
"""Extract resolver configuration from the Windows registry."""
lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
want_scan = False
try:
try:
# XP, 2000
tcp_params = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\Tcpip\Parameters')
want_scan = True
except EnvironmentError:
# ME
tcp_params = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\VxD\MSTCP')
try:
self._config_win32_fromkey(tcp_params)
finally:
tcp_params.Close()
if want_scan:
interfaces = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\Tcpip\Parameters'
r'\Interfaces')
try:
i = 0
while True:
try:
guid = _winreg.EnumKey(interfaces, i)
i += 1
key = _winreg.OpenKey(interfaces, guid)
if not self._win32_is_nic_enabled(lm, guid, key):
continue
try:
self._config_win32_fromkey(key)
finally:
key.Close()
except EnvironmentError:
break
finally:
interfaces.Close()
finally:
lm.Close()
def _win32_is_nic_enabled(self, lm, guid, interface_key):
# Look in the Windows Registry to determine whether the network
# interface corresponding to the given guid is enabled.
#
# (Code contributed by Paul Marks, thanks!)
#
try:
# This hard-coded location seems to be consistent, at least
# from Windows 2000 through Vista.
connection_key = _winreg.OpenKey(
lm,
r'SYSTEM\CurrentControlSet\Control\Network'
r'\{4D36E972-E325-11CE-BFC1-08002BE10318}'
r'\%s\Connection' % guid)
try:
# The PnpInstanceID points to a key inside Enum
(pnp_id, ttype) = _winreg.QueryValueEx(
connection_key, 'PnpInstanceID')
if ttype != _winreg.REG_SZ:
raise ValueError
device_key = _winreg.OpenKey(
lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id)
try:
# Get ConfigFlags for this device
(flags, ttype) = _winreg.QueryValueEx(
device_key, 'ConfigFlags')
if ttype != _winreg.REG_DWORD:
raise ValueError
# Based on experimentation, bit 0x1 indicates that the
# device is disabled.
return not (flags & 0x1)
finally:
device_key.Close()
finally:
connection_key.Close()
except (EnvironmentError, ValueError):
# Pre-vista, enabled interfaces seem to have a non-empty
# NTEContextList; this was how dnspython detected enabled
# nics before the code above was contributed. We've retained
# the old method since we don't know if the code above works
# on Windows 95/98/ME.
try:
(nte, ttype) = _winreg.QueryValueEx(interface_key,
'NTEContextList')
return nte is not None
except WindowsError:
return False
def _compute_timeout(self, start):
now = time.time()
if now < start:
if start - now > 1:
# Time going backwards is bad. Just give up.
raise Timeout
else:
# Time went backwards, but only a little. This can
# happen, e.g. under vmware with older linux kernels.
# Pretend it didn't happen.
now = start
duration = now - start
if duration >= self.lifetime:
raise Timeout
return min(self.lifetime - duration, self.timeout)
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None):
"""Query nameservers to find the answer to the question.
The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects
of the appropriate type, or strings that can be converted into objects
of the appropriate type. E.g. For I{rdtype} the integer 2 and the
the string 'NS' both mean to query for records with DNS rdata type NS.
@param qname: the query name
@type qname: dns.name.Name object or string
@param rdtype: the query type
@type rdtype: int or string
@param rdclass: the query class
@type rdclass: int or string
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param source: bind to this IP address (defaults to machine default IP).
@type source: IP address in dotted quad notation
@rtype: dns.resolver.Answer instance
@raises Timeout: no answers could be found in the specified lifetime
@raises NXDOMAIN: the query name does not exist
@raises NoAnswer: the response did not contain an answer
@raises NoNameservers: no non-broken nameservers are available to
answer the question."""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname, None)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
if isinstance(rdclass, str):
rdclass = dns.rdataclass.from_text(rdclass)
qnames_to_try = []
if qname.is_absolute():
qnames_to_try.append(qname)
else:
if len(qname) > 1:
qnames_to_try.append(qname.concatenate(dns.name.root))
if self.search:
for suffix in self.search:
qnames_to_try.append(qname.concatenate(suffix))
else:
qnames_to_try.append(qname.concatenate(self.domain))
all_nxdomain = True
start = time.time()
for qname in qnames_to_try:
if self.cache:
answer = self.cache.get((qname, rdtype, rdclass))
if answer:
return answer
request = dns.message.make_query(qname, rdtype, rdclass)
if not self.keyname is None:
request.use_tsig(self.keyring, self.keyname, self.keyalgorithm)
request.use_edns(self.edns, self.ednsflags, self.payload)
response = None
#
# make a copy of the servers list so we can alter it later.
#
nameservers = self.nameservers[:]
backoff = 0.10
while response is None:
if len(nameservers) == 0:
raise NoNameservers
for nameserver in nameservers[:]:
timeout = self._compute_timeout(start)
try:
if tcp:
response = dns.query.tcp(request, nameserver,
timeout, self.port,
source=source)
else:
response = dns.query.udp(request, nameserver,
timeout, self.port,
source=source)
except (socket.error, dns.exception.Timeout):
#
# Communication failure or timeout. Go to the
# next server
#
response = None
continue
except dns.query.UnexpectedSource:
#
# Who knows? Keep going.
#
response = None
continue
except dns.exception.FormError:
#
# We don't understand what this server is
# saying. Take it out of the mix and
# continue.
#
nameservers.remove(nameserver)
response = None
continue
rcode = response.rcode()
if rcode == dns.rcode.NOERROR or \
rcode == dns.rcode.NXDOMAIN:
break
#
# We got a response, but we're not happy with the
# rcode in it. Remove the server from the mix if
# the rcode isn't SERVFAIL.
#
if rcode != dns.rcode.SERVFAIL:
nameservers.remove(nameserver)
response = None
if not response is None:
break
#
# All nameservers failed!
#
if len(nameservers) > 0:
#
# But we still have servers to try. Sleep a bit
# so we don't pound them!
#
timeout = self._compute_timeout(start)
sleep_time = min(timeout, backoff)
backoff *= 2
time.sleep(sleep_time)
if response.rcode() == dns.rcode.NXDOMAIN:
continue
all_nxdomain = False
break
if all_nxdomain:
raise NXDOMAIN
answer = Answer(qname, rdtype, rdclass, response)
if self.cache:
self.cache.put((qname, rdtype, rdclass), answer)
return answer
def use_tsig(self, keyring, keyname=None,
algorithm=dns.tsig.default_algorithm):
"""Add a TSIG signature to the query.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@param algorithm: The TSIG key algorithm to use. The default
is dns.tsig.default_algorithm.
@type algorithm: string"""
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
self.keyname = keyname
self.keyalgorithm = algorithm
def use_edns(self, edns, ednsflags, payload):
"""Configure Edns.
@param edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@param ednsflags: The EDNS flags
@type ednsflags: int
@param payload: The EDNS payload size. The default is 0.
@type payload: int"""
if edns is None:
edns = -1
self.edns = edns
self.ednsflags = ednsflags
self.payload = payload
default_resolver = None
def get_default_resolver():
"""Get the default resolver, initializing it if necessary."""
global default_resolver
if default_resolver is None:
default_resolver = Resolver()
return default_resolver
def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None):
"""Query nameservers to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
@see: L{dns.resolver.Resolver.query} for more information on the
parameters."""
return get_default_resolver().query(qname, rdtype, rdclass, tcp, source)
def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):
"""Find the name of the zone which contains the specified name.
@param name: the query name
@type name: absolute dns.name.Name object or string
@param rdclass: The query class
@type rdclass: int
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param resolver: the resolver to use
@type resolver: dns.resolver.Resolver object or None
@rtype: dns.name.Name"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, dns.name.root)
if resolver is None:
resolver = get_default_resolver()
if not name.is_absolute():
raise NotAbsolute(name)
while 1:
try:
answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
return name
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
try:
name = name.parent()
except dns.name.NoParent:
raise NoRootSOA
| apache-2.0 |
harej/reports_bot | reportsbot/config.py | 2 | 2143 | # -*- coding: utf-8 -*-
import errno
from os import path
import yaml
from .exceptions import ConfigError
__all__ = ["Config"]
class Config:
"""Stores general-purpose bot configuration."""
def __init__(self, base_dir):
self._base_dir = base_dir
self._data = {}
self._load()
def _load(self):
"""Load or reload the bot's main configuration file (config.yml)."""
filename = path.join(self._base_dir, "config.yml")
try:
with open(filename) as fp:
self._data = yaml.full_load(fp)
except (OSError, yaml.error.YAMLError) as exc:
if exc.errno == errno.ENOENT: # Ignore missing file; use defaults
return
err = "Couldn't read config file ({}):\n{}"
raise ConfigError(err.format(filename, exc)) from None
def _get_sql_info(self, which):
"""Get some SQL connection info."""
sql = self._data.get("sql", {})
info = sql.get("all", {}).copy()
info.update(sql.get(which, {}))
return info
@property
def dir(self):
"""Return the bot's config directory."""
return self._base_dir
@property
def username(self):
"""Return the bot's username."""
return self._data.get("username")
@property
def default_project(self):
"""Return the default site project, like 'wikipedia'."""
return self._data.get("defaults", {}).get("project", "wikipedia")
@property
def default_lang(self):
"""Return the default site language, like 'en'."""
return self._data.get("defaults", {}).get("lang", "en")
def get_wiki_sql(self, site):
"""Return SQL connection info for the wiki DB for the given site."""
info = self._get_sql_info("wiki")
for key, val in info.items(): # Convert db="{site}_p" to "enwiki_p"
if isinstance(val, str):
info[key] = val.format(site=site)
return info
def get_local_sql(self):
"""Return SQL connection info for the local Reports bot/WPX DB."""
return self._get_sql_info("local")
| mit |
hynnet/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/scons-2.1.0/SCons/Defaults.py | 21 | 17649 | """SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import division
__revision__ = "src/engine/SCons/Defaults.py 5357 2011/09/09 21:31:03 bdeegan"
import os
import errno
import shutil
import stat
import time
import sys
import SCons.Action
import SCons.Builder
import SCons.CacheDir
import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def _fetch_DefaultEnvironment(*args, **kw):
"""
Returns the already-created default construction environment.
"""
global _default_env
return _default_env
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = SCons.Environment.Environment(*args, **kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError("Source file: %s is static and is not compatible with shared target: %s" % (src, target[0]))
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# These aren't really tool scanners, so they don't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else
# they should go. Leave them here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
def get_paths_str(dest):
# If dest is a list, we need to manually call str() on each element
if SCons.Util.is_List(dest):
elem_strs = []
for element in dest:
elem_strs.append('"' + str(element) + '"')
return '[' + ', '.join(elem_strs) + ']'
else:
return '"' + str(dest) + '"'
def chmod_func(dest, mode):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for element in dest:
os.chmod(str(element), mode)
def chmod_strfunc(dest, mode):
return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode)
Chmod = ActionFactory(chmod_func, chmod_strfunc)
def copy_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, 1)
Copy = ActionFactory(copy_func,
lambda dest, src: 'Copy("%s", "%s")' % (dest, src),
convert=str)
def delete_func(dest, must_exist=0):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
entry = str(entry)
# os.path.exists returns False with broken links that exist
entry_exists = os.path.exists(entry) or os.path.islink(entry)
if not entry_exists and not must_exist:
continue
# os.path.isdir returns True when entry is a link to a dir
if os.path.isdir(entry) and not os.path.islink(entry):
shutil.rmtree(entry, 1)
continue
os.unlink(entry)
def delete_strfunc(dest, must_exist=0):
return 'Delete(%s)' % get_paths_str(dest)
Delete = ActionFactory(delete_func, delete_strfunc)
def mkdir_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
try:
os.makedirs(str(entry))
except os.error, e:
p = str(entry)
if (e.args[0] == errno.EEXIST or
(sys.platform=='win32' and e.args[0]==183)) \
and os.path.isdir(str(entry)):
pass # not an error if already exists
else:
raise
Mkdir = ActionFactory(mkdir_func,
lambda dir: 'Mkdir(%s)' % get_paths_str(dir))
def move_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
SCons.Node.FS.invalidate_node_memos(src)
shutil.move(src, dest)
Move = ActionFactory(move_func,
lambda dest, src: 'Move("%s", "%s")' % (dest, src),
convert=str)
def touch_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for file in dest:
file = str(file)
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch(%s)' % get_paths_str(file))
# Internal utility functions
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if l is not None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, itms, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for
the existence of prefixes or suffixes on list items and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not itms:
return itms
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = list(map(env.subst, SCons.Util.flatten(stripprefixes)))
stripsuffixes = list(map(env.subst, SCons.Util.flatten(stripsuffixes)))
stripped = []
for l in SCons.PathList.PathList(itms).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env)
def processDefines(defs):
"""process defines, resolving strings, lists, dictionaries, into a list of
strings
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if d is None:
continue
elif SCons.Util.is_List(d) or isinstance(d, tuple):
if len(d) >= 2:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d[0]))
elif SCons.Util.is_Dict(d):
for macro,value in d.iteritems():
if value is not None:
l.append(str(macro) + '=' + str(value))
else:
l.append(str(macro))
elif SCons.Util.is_String(d):
l.append(str(d))
else:
raise SCons.Errors.UserError("DEFINE %s is not a list, dict, string or None."%repr(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
for k,v in sorted(defs.items()):
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return l
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
return c(prefix, env.subst_path(processDefines(defs)), suffix, env)
class NullCmdGenerator(object):
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller(object):
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1//0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
variable = self.variable
while frame:
if variable in frame.f_locals:
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return method(*args, **kw)
frame = frame.f_back
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
# 'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes, # moved to the TeX tools generate functions
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
pylbert/upm | examples/python/apa102.py | 6 | 1909 | #!/usr/bin/env python
# Author: Yannick Adam <[email protected]>
# Copyright (c) 2016 Yannick Adam
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_apa102 as mylib
def main():
# Instantiate a strip of 30 LEDs on SPI bus 0
ledStrip = mylib.APA102(30, 0, False)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# Register exit handlers
signal.signal(signal.SIGINT, SIGINTHandler)
print("Setting all LEDs to Green")
ledStrip.setAllLeds(31, 0, 255, 0)
print("Setting LEDs between 10 and 20 to Red")
ledStrip.setLeds(10, 20, 31, 255, 0, 0)
print("Setting LED 15 to Blue")
ledStrip.setLed(15, 31, 0, 0, 255)
if __name__ == '__main__':
main()
| mit |
affo/nova | nova/virt/hyperv/livemigrationutils.py | 2 | 11279 | # Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
if sys.platform == 'win32':
import wmi
from oslo_log import log as logging
from nova import exception
from nova.i18n import _
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
class LiveMigrationUtils(object):
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
self._volutils = volumeutilsv2.VolumeUtilsV2()
def _get_conn_v2(self, host='localhost'):
try:
return wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
except wmi.x_wmi as ex:
LOG.exception(ex)
if ex.com_error.hresult == -2147217394:
msg = (_('Live migration is not supported on target host "%s"')
% host)
elif ex.com_error.hresult == -2147023174:
msg = (_('Target live migration host "%s" is unreachable')
% host)
else:
msg = _('Live migration failed: %s') % ex.message
raise vmutils.HyperVException(msg)
def check_live_migration_config(self):
conn_v2 = self._get_conn_v2()
migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0]
vsmssds = migration_svc.associators(
wmi_association_class='Msvm_ElementSettingData',
wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')
vsmssd = vsmssds[0]
if not vsmssd.EnableVirtualSystemMigration:
raise vmutils.HyperVException(
_('Live migration is not enabled on this host'))
if not migration_svc.MigrationServiceListenerIPAddressList:
raise vmutils.HyperVException(
_('Live migration networks are not configured on this host'))
def _get_vm(self, conn_v2, vm_name):
vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if not n:
raise exception.NotFound(_('VM not found: %s') % vm_name)
elif n > 1:
raise vmutils.HyperVException(_('Duplicate VM name found: %s')
% vm_name)
return vms[0]
def _destroy_planned_vm(self, conn_v2_remote, planned_vm):
LOG.debug("Destroying existing remote planned VM: %s",
planned_vm.ElementName)
vs_man_svc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_())
self._vmutils.check_ret_val(ret_val, job_path)
def _check_existing_planned_vm(self, conn_v2_remote, vm):
# Make sure that there's not yet a remote planned VM on the target
# host for this VM
planned_vms = conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)
if planned_vms:
self._destroy_planned_vm(conn_v2_remote, planned_vms[0])
def _create_remote_planned_vm(self, conn_v2_local, conn_v2_remote,
vm, rmt_ip_addr_list, dest_host):
# Staged
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32770")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
migration_setting_data = vsmsd.GetText_(1)
LOG.debug("Creating remote planned VM for VM: %s",
vm.ElementName)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
return conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)[0]
def _get_physical_disk_paths(self, vm_name):
ide_ctrl_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
if ide_ctrl_path:
ide_paths = self._vmutils.get_controller_volume_paths(
ide_ctrl_path)
else:
ide_paths = {}
scsi_ctrl_path = self._vmutils.get_vm_scsi_controller(vm_name)
scsi_paths = self._vmutils.get_controller_volume_paths(scsi_ctrl_path)
return dict(ide_paths.items() + scsi_paths.items())
def _get_remote_disk_data(self, vmutils_remote, disk_paths, dest_host):
volutils_remote = volumeutilsv2.VolumeUtilsV2(dest_host)
disk_paths_remote = {}
for (rasd_rel_path, disk_path) in disk_paths.items():
target = self._volutils.get_target_from_disk_path(disk_path)
if target:
(target_iqn, target_lun) = target
dev_num = volutils_remote.get_device_number_for_target(
target_iqn, target_lun)
disk_path_remote = (
vmutils_remote.get_mounted_disk_by_drive_number(dev_num))
disk_paths_remote[rasd_rel_path] = disk_path_remote
else:
LOG.debug("Could not retrieve iSCSI target "
"from disk path: %s", disk_path)
return disk_paths_remote
def _update_planned_vm_disk_resources(self, vmutils_remote, conn_v2_remote,
planned_vm, vm_name,
disk_paths_remote):
vm_settings = planned_vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
updated_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent')
for sasd in sasds:
if (sasd.ResourceType == 17 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Physical Disk Drive" and
sasd.HostResource):
# Replace the local disk target with the correct remote one
old_disk_path = sasd.HostResource[0]
new_disk_path = disk_paths_remote.pop(sasd.path().RelPath)
LOG.debug("Replacing host resource "
"%(old_disk_path)s with "
"%(new_disk_path)s on planned VM %(vm_name)s",
{'old_disk_path': old_disk_path,
'new_disk_path': new_disk_path,
'vm_name': vm_name})
sasd.HostResource = [new_disk_path]
updated_resource_setting_data.append(sasd.GetText_(1))
LOG.debug("Updating remote planned VM disk paths for VM: %s",
vm_name)
vsmsvc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings(
ResourceSettings=updated_resource_setting_data)
vmutils_remote.check_ret_val(ret_val, job_path)
def _get_vhd_setting_data(self, vm):
vm_settings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
new_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
wmi_result_class='Msvm_StorageAllocationSettingData')
for sasd in sasds:
if (sasd.ResourceType == 31 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Virtual Hard Disk"):
new_resource_setting_data.append(sasd.GetText_(1))
return new_resource_setting_data
def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host):
# VirtualSystemAndStorage
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32771")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
if planned_vm:
vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name
migration_setting_data = vsmsd.GetText_(1)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
LOG.debug("Starting live migration for VM: %s", vm.ElementName)
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data,
NewResourceSettingData=new_resource_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_remote_ip_address_list(self, conn_v2_remote, dest_host):
LOG.debug("Getting live migration networks for remote host: %s",
dest_host)
migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
return migr_svc_rmt.MigrationServiceListenerIPAddressList
def live_migrate_vm(self, vm_name, dest_host):
self.check_live_migration_config()
conn_v2_local = self._get_conn_v2()
conn_v2_remote = self._get_conn_v2(dest_host)
vm = self._get_vm(conn_v2_local, vm_name)
self._check_existing_planned_vm(conn_v2_remote, vm)
rmt_ip_addr_list = self._get_remote_ip_address_list(conn_v2_remote,
dest_host)
planned_vm = None
disk_paths = self._get_physical_disk_paths(vm_name)
if disk_paths:
vmutils_remote = vmutilsv2.VMUtilsV2(dest_host)
disk_paths_remote = self._get_remote_disk_data(vmutils_remote,
disk_paths,
dest_host)
planned_vm = self._create_remote_planned_vm(conn_v2_local,
conn_v2_remote,
vm, rmt_ip_addr_list,
dest_host)
self._update_planned_vm_disk_resources(vmutils_remote,
conn_v2_remote, planned_vm,
vm_name, disk_paths_remote)
new_resource_setting_data = self._get_vhd_setting_data(vm)
self._live_migrate_vm(conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host)
| apache-2.0 |
UniversalMasterEgg8679/ansible | lib/ansible/plugins/shell/powershell.py | 26 | 46097 | # (c) 2014, Chris Church <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import os
import re
import shlex
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
# Primarily for testing, allow explicitly specifying PowerShell version via
# an environment variable.
_powershell_version = os.environ.get('POWERSHELL_VERSION', None)
if _powershell_version:
_common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
exec_wrapper = br'''
#Requires -Version 3.0
begin {
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
function ConvertTo-HashtableFromPsCustomObject ($myPsObject){
$output = @{};
$myPsObject | Get-Member -MemberType *Property | % {
$val = $myPsObject.($_.name);
If ($val -is [psobject]) {
$val = ConvertTo-HashtableFromPsCustomObject $val
}
$output.($_.name) = $val
}
return $output;
}
# stream JSON including become_pw, ps_module_payload, bin_module_payload, become_payload, write_payload_path, preserve directives
# exec runspace, capture output, cleanup, return module output
$json_raw = ""
}
process {
$input_as_string = [string]$input
$json_raw += $input_as_string
}
end {
If (-not $json_raw) {
Write-Error "no input given" -Category InvalidArgument
}
$payload = ConvertTo-HashtableFromPsCustomObject (ConvertFrom-Json $json_raw)
# TODO: handle binary modules
# TODO: handle persistence
$actions = $payload.actions
# pop 0th action as entrypoint
$entrypoint = $payload.($actions[0])
$payload.actions = $payload.actions[1..99]
$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
# load the current action entrypoint as a module custom object with a Run method
$entrypoint = New-Module -ScriptBlock ([scriptblock]::Create($entrypoint)) -AsCustomObject
Set-Variable -Scope global -Name complex_args -Value $payload["module_args"] | Out-Null
# dynamically create/load modules
ForEach ($mod in $payload.powershell_modules.GetEnumerator()) {
$decoded_module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($mod.Value))
New-Module -ScriptBlock ([scriptblock]::Create($decoded_module)) -Name $mod.Key | Import-Module -WarningAction SilentlyContinue | Out-Null
}
$output = $entrypoint.Run($payload)
Write-Output $output
}
''' # end exec_wrapper
leaf_exec = br'''
Function Run($payload) {
$entrypoint = $payload.module_entry
$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
$ps = [powershell]::Create()
$ps.AddStatement().AddCommand("Set-Variable").AddParameters(@{Scope="global";Name="complex_args";Value=$payload.module_args}) | Out-Null
$ps.AddCommand("Out-Null") | Out-Null
# redefine Write-Host to dump to output instead of failing- lots of scripts use it
$ps.AddStatement().AddScript("Function Write-Host(`$msg){ Write-Output `$msg }") | Out-Null
ForEach ($env_kv in $payload.environment.GetEnumerator()) {
$escaped_env_set = "`$env:{0} = '{1}'" -f $env_kv.Key,$env_kv.Value.Replace("'","''")
$ps.AddStatement().AddScript($escaped_env_set) | Out-Null
}
# dynamically create/load modules
ForEach ($mod in $payload.powershell_modules.GetEnumerator()) {
$decoded_module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($mod.Value))
$ps.AddStatement().AddCommand("New-Module").AddParameters(@{ScriptBlock=([scriptblock]::Create($decoded_module));Name=$mod.Key}) | Out-Null
$ps.AddCommand("Import-Module").AddParameters(@{WarningAction="SilentlyContinue"}) | Out-Null
$ps.AddCommand("Out-Null") | Out-Null
}
# force input encoding to preamble-free UTF8 so PS sub-processes (eg, Start-Job) don't blow up
$ps.AddStatement().AddScript("[Console]::InputEncoding = New-Object Text.UTF8Encoding `$false") | Out-Null
$ps.AddStatement().AddScript($entrypoint) | Out-Null
$output = $ps.Invoke()
$output
# PS3 doesn't properly set HadErrors in many cases, inspect the error stream as a fallback
If ($ps.HadErrors -or ($PSVersionTable.PSVersion.Major -lt 4 -and $ps.Streams.Error.Count -gt 0)) {
[System.Console]::Error.WriteLine($($ps.Streams.Error | Out-String))
$exit_code = $ps.Runspace.SessionStateProxy.GetVariable("LASTEXITCODE")
If(-not $exit_code) {
$exit_code = 1
}
# need to use this instead of Exit keyword to prevent runspace from crashing with dynamic modules
$host.SetShouldExit($exit_code)
}
}
''' # end leaf_exec
become_wrapper = br'''
Set-StrictMode -Version 2
$ErrorActionPreference = "Stop"
$helper_def = @"
using System;
using System.Diagnostics;
using System.IO;
using System.Threading;
using System.Security;
using System.Security.AccessControl;
using System.Security.Principal;
using System.Runtime.InteropServices;
namespace Ansible.Shell
{
public class ProcessUtil
{
public static void GetProcessOutput(StreamReader stdoutStream, StreamReader stderrStream, out string stdout, out string stderr)
{
var sowait = new EventWaitHandle(false, EventResetMode.ManualReset);
var sewait = new EventWaitHandle(false, EventResetMode.ManualReset);
string so = null, se = null;
ThreadPool.QueueUserWorkItem((s)=>
{
so = stdoutStream.ReadToEnd();
sowait.Set();
});
ThreadPool.QueueUserWorkItem((s) =>
{
se = stderrStream.ReadToEnd();
sewait.Set();
});
foreach(var wh in new WaitHandle[] { sowait, sewait })
wh.WaitOne();
stdout = so;
stderr = se;
}
// http://stackoverflow.com/a/30687230/139652
public static void GrantAccessToWindowStationAndDesktop(string username)
{
const int WindowStationAllAccess = 0x000f037f;
GrantAccess(username, GetProcessWindowStation(), WindowStationAllAccess);
const int DesktopRightsAllAccess = 0x000f01ff;
GrantAccess(username, GetThreadDesktop(GetCurrentThreadId()), DesktopRightsAllAccess);
}
private static void GrantAccess(string username, IntPtr handle, int accessMask)
{
SafeHandle safeHandle = new NoopSafeHandle(handle);
GenericSecurity security =
new GenericSecurity(false, ResourceType.WindowObject, safeHandle, AccessControlSections.Access);
security.AddAccessRule(
new GenericAccessRule(new NTAccount(username), accessMask, AccessControlType.Allow));
security.Persist(safeHandle, AccessControlSections.Access);
}
[DllImport("user32.dll", SetLastError = true)]
private static extern IntPtr GetProcessWindowStation();
[DllImport("user32.dll", SetLastError = true)]
private static extern IntPtr GetThreadDesktop(int dwThreadId);
[DllImport("kernel32.dll", SetLastError = true)]
private static extern int GetCurrentThreadId();
private class GenericAccessRule : AccessRule
{
public GenericAccessRule(IdentityReference identity, int accessMask, AccessControlType type) :
base(identity, accessMask, false, InheritanceFlags.None, PropagationFlags.None, type) { }
}
private class GenericSecurity : NativeObjectSecurity
{
public GenericSecurity(bool isContainer, ResourceType resType, SafeHandle objectHandle, AccessControlSections sectionsRequested)
: base(isContainer, resType, objectHandle, sectionsRequested) { }
public new void Persist(SafeHandle handle, AccessControlSections includeSections) { base.Persist(handle, includeSections); }
public new void AddAccessRule(AccessRule rule) { base.AddAccessRule(rule); }
public override Type AccessRightType { get { throw new NotImplementedException(); } }
public override AccessRule AccessRuleFactory(System.Security.Principal.IdentityReference identityReference, int accessMask, bool isInherited,
InheritanceFlags inheritanceFlags, PropagationFlags propagationFlags, AccessControlType type) { throw new NotImplementedException(); }
public override Type AccessRuleType { get { return typeof(AccessRule); } }
public override AuditRule AuditRuleFactory(System.Security.Principal.IdentityReference identityReference, int accessMask, bool isInherited,
InheritanceFlags inheritanceFlags, PropagationFlags propagationFlags, AuditFlags flags) { throw new NotImplementedException(); }
public override Type AuditRuleType { get { return typeof(AuditRule); } }
}
private class NoopSafeHandle : SafeHandle
{
public NoopSafeHandle(IntPtr handle) : base(handle, false) { }
public override bool IsInvalid { get { return false; } }
protected override bool ReleaseHandle() { return true; }
}
}
}
"@
$exec_wrapper = {
#Requires -Version 3.0
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
function ConvertTo-HashtableFromPsCustomObject ($myPsObject){
$output = @{};
$myPsObject | Get-Member -MemberType *Property | % {
$val = $myPsObject.($_.name);
If ($val -is [psobject]) {
$val = ConvertTo-HashtableFromPsCustomObject $val
}
$output.($_.name) = $val
}
return $output;
}
# stream JSON including become_pw, ps_module_payload, bin_module_payload, become_payload, write_payload_path, preserve directives
# exec runspace, capture output, cleanup, return module output
$json_raw = [System.Console]::In.ReadToEnd()
If (-not $json_raw) {
Write-Error "no input given" -Category InvalidArgument
}
$payload = ConvertTo-HashtableFromPsCustomObject (ConvertFrom-Json $json_raw)
# TODO: handle binary modules
# TODO: handle persistence
$actions = $payload.actions
# pop 0th action as entrypoint
$entrypoint = $payload.($actions[0])
$payload.actions = $payload.actions[1..99]
$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
# load the current action entrypoint as a module custom object with a Run method
$entrypoint = New-Module -ScriptBlock ([scriptblock]::Create($entrypoint)) -AsCustomObject
Set-Variable -Scope global -Name complex_args -Value $payload["module_args"] | Out-Null
# dynamically create/load modules
ForEach ($mod in $payload.powershell_modules.GetEnumerator()) {
$decoded_module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($mod.Value))
New-Module -ScriptBlock ([scriptblock]::Create($decoded_module)) -Name $mod.Key | Import-Module -WarningAction SilentlyContinue | Out-Null
}
$output = $entrypoint.Run($payload)
Write-Output $output
} # end exec_wrapper
Function Dump-Error ($excep) {
$eo = @{failed=$true}
$eo.msg = $excep.Exception.Message
$eo.exception = $excep | Out-String
$host.SetShouldExit(1)
$eo | ConvertTo-Json -Depth 10
}
Function Run($payload) {
# NB: action popping handled inside subprocess wrapper
$username = $payload.become_user
$password = $payload.become_password
Add-Type -TypeDefinition $helper_def -Debug:$false
$exec_args = $null
$exec_application = "powershell"
# NB: CreateProcessWithLogonW commandline maxes out at 1024 chars, must bootstrap via filesystem
$temp = [System.IO.Path]::Combine([System.IO.Path]::GetTempPath(), [System.IO.Path]::GetRandomFileName() + ".ps1")
$exec_wrapper.ToString() | Set-Content -Path $temp
# TODO: grant target user permissions on tempfile/tempdir
Try {
# Base64 encode the command so we don't have to worry about the various levels of escaping
# $encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($exec_wrapper.ToString()))
# force the input encoding to preamble-free UTF8 before we create the new process
[System.Console]::InputEncoding = $(New-Object System.Text.UTF8Encoding @($false))
$exec_args = @("-noninteractive", $temp)
$proc = New-Object System.Diagnostics.Process
$psi = $proc.StartInfo
$psi.FileName = $exec_application
$psi.Arguments = $exec_args
$psi.RedirectStandardInput = $true
$psi.RedirectStandardOutput = $true
$psi.RedirectStandardError = $true
$psi.UseShellExecute = $false
If($username.Contains("\")) {
$sp = $username.Split(@([char]"\"), 2)
$domain = $sp[0]
$username = $sp[1]
}
ElseIf ($username.Contains("@")) {
$domain = $null
}
Else {
$domain = "."
}
$psi.Domain = $domain
$psi.Username = $username
$psi.Password = $($password | ConvertTo-SecureString -AsPlainText -Force)
Try {
[Ansible.Shell.ProcessUtil]::GrantAccessToWindowStationAndDesktop($username)
}
Catch {
$excep = $_
throw "Error granting windowstation/desktop access to '$username' (is the username valid?): $excep"
}
Try {
$proc.Start() | Out-Null # will always return $true for non shell-exec cases
}
Catch {
$excep = $_
if ($excep.Exception.InnerException -and `
$excep.Exception.InnerException -is [System.ComponentModel.Win32Exception] -and `
$excep.Exception.InnerException.NativeErrorCode -eq 5) {
throw "Become method 'runas' become is not currently supported with the NTLM or Kerberos auth types"
}
throw "Error launching under identity '$username': $excep"
}
$payload_string = $payload | ConvertTo-Json -Depth 99 -Compress
# push the execution payload over stdin
$proc.StandardInput.WriteLine($payload_string)
$proc.StandardInput.Close()
$stdout = $stderr = [string] $null
[Ansible.Shell.ProcessUtil]::GetProcessOutput($proc.StandardOutput, $proc.StandardError, [ref] $stdout, [ref] $stderr) | Out-Null
# TODO: decode CLIXML stderr output (and other streams?)
$proc.WaitForExit() | Out-Null
$rc = $proc.ExitCode
If ($rc -eq 0) {
$stdout
$stderr
}
Else {
Throw "failed, rc was $rc, stderr was $stderr, stdout was $stdout"
}
}
Catch {
$excep = $_
Dump-Error $excep
}
Finally {
Remove-Item $temp -ErrorAction SilentlyContinue
}
}
''' # end become_wrapper
async_wrapper = br'''
Set-StrictMode -Version 2
$ErrorActionPreference = "Stop"
# build exec_wrapper encoded command
# start powershell with breakaway running exec_wrapper encodedcommand
# stream payload to powershell with normal exec, but normal exec writes results to resultfile instead of stdout/stderr
# return asyncresult to controller
$exec_wrapper = {
#Requires -Version 3.0
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
function ConvertTo-HashtableFromPsCustomObject ($myPsObject){
$output = @{};
$myPsObject | Get-Member -MemberType *Property | % {
$val = $myPsObject.($_.name);
If ($val -is [psobject]) {
$val = ConvertTo-HashtableFromPsCustomObject $val
}
$output.($_.name) = $val
}
return $output;
}
# stream JSON including become_pw, ps_module_payload, bin_module_payload, become_payload, write_payload_path, preserve directives
# exec runspace, capture output, cleanup, return module output
$json_raw = [System.Console]::In.ReadToEnd()
If (-not $json_raw) {
Write-Error "no input given" -Category InvalidArgument
}
$payload = ConvertTo-HashtableFromPsCustomObject (ConvertFrom-Json $json_raw)
# TODO: handle binary modules
# TODO: handle persistence
$actions = $payload.actions
# pop 0th action as entrypoint
$entrypoint = $payload.($actions[0])
$payload.actions = $payload.actions[1..99]
$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
# load the current action entrypoint as a module custom object with a Run method
$entrypoint = New-Module -ScriptBlock ([scriptblock]::Create($entrypoint)) -AsCustomObject
Set-Variable -Scope global -Name complex_args -Value $payload["module_args"] | Out-Null
# dynamically create/load modules
ForEach ($mod in $payload.powershell_modules.GetEnumerator()) {
$decoded_module = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($mod.Value))
New-Module -ScriptBlock ([scriptblock]::Create($decoded_module)) -Name $mod.Key | Import-Module -WarningAction SilentlyContinue | Out-Null
}
$output = $entrypoint.Run($payload)
Write-Output $output
} # end exec_wrapper
Function Run($payload) {
# BEGIN Ansible.Async native type definition
$native_process_util = @"
using Microsoft.Win32.SafeHandles;
using System;
using System.ComponentModel;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading;
namespace Ansible.Async {
public static class NativeProcessUtil
{
[DllImport("kernel32.dll", SetLastError=true, CharSet=CharSet.Unicode, BestFitMapping=false)]
public static extern bool CreateProcess(
[MarshalAs(UnmanagedType.LPTStr)]
string lpApplicationName,
StringBuilder lpCommandLine,
IntPtr lpProcessAttributes,
IntPtr lpThreadAttributes,
bool bInheritHandles,
uint dwCreationFlags,
IntPtr lpEnvironment,
[MarshalAs(UnmanagedType.LPTStr)]
string lpCurrentDirectory,
STARTUPINFOEX lpStartupInfo,
out PROCESS_INFORMATION lpProcessInformation);
[DllImport("kernel32.dll", SetLastError=true, CharSet=CharSet.Unicode)]
public static extern uint SearchPath (
string lpPath,
string lpFileName,
string lpExtension,
int nBufferLength,
[MarshalAs (UnmanagedType.LPTStr)]
StringBuilder lpBuffer,
out IntPtr lpFilePart);
[DllImport("kernel32.dll")]
public static extern bool CreatePipe(out IntPtr hReadPipe, out IntPtr hWritePipe, SECURITY_ATTRIBUTES lpPipeAttributes, uint nSize);
[DllImport("kernel32.dll", SetLastError=true)]
public static extern IntPtr GetStdHandle(StandardHandleValues nStdHandle);
[DllImport("kernel32.dll", SetLastError=true)]
public static extern bool SetHandleInformation(IntPtr hObject, HandleFlags dwMask, int dwFlags);
[DllImport("kernel32.dll", SetLastError=true)]
public static extern bool InitializeProcThreadAttributeList(IntPtr lpAttributeList, int dwAttributeCount, int dwFlags, ref int lpSize);
[DllImport("kernel32.dll", SetLastError=true)]
public static extern bool UpdateProcThreadAttribute(
IntPtr lpAttributeList,
uint dwFlags,
IntPtr Attribute,
IntPtr lpValue,
IntPtr cbSize,
IntPtr lpPreviousValue,
IntPtr lpReturnSize);
public static string SearchPath(string findThis)
{
StringBuilder sbOut = new StringBuilder(1024);
IntPtr filePartOut;
if(SearchPath(null, findThis, null, sbOut.Capacity, sbOut, out filePartOut) == 0)
throw new FileNotFoundException("Couldn't locate " + findThis + " on path");
return sbOut.ToString();
}
[DllImport("kernel32.dll", SetLastError=true)]
static extern SafeFileHandle OpenThread(
ThreadAccessRights dwDesiredAccess,
bool bInheritHandle,
int dwThreadId);
[DllImport("kernel32.dll", SetLastError=true)]
static extern int ResumeThread(SafeHandle hThread);
public static void ResumeThreadById(int threadId)
{
var threadHandle = OpenThread(ThreadAccessRights.SUSPEND_RESUME, false, threadId);
if(threadHandle.IsInvalid)
throw new Exception(String.Format("Thread ID {0} is invalid ({1})", threadId,
new Win32Exception(Marshal.GetLastWin32Error()).Message));
try
{
if(ResumeThread(threadHandle) == -1)
throw new Exception(String.Format("Thread ID {0} cannot be resumed ({1})", threadId,
new Win32Exception(Marshal.GetLastWin32Error()).Message));
}
finally
{
threadHandle.Dispose();
}
}
public static void ResumeProcessById(int pid)
{
var proc = Process.GetProcessById(pid);
// wait for at least one suspended thread in the process (this handles possible slow startup race where
// primary thread of created-suspended process has not yet become runnable)
var retryCount = 0;
while(!proc.Threads.OfType<ProcessThread>().Any(t=>t.ThreadState == System.Diagnostics.ThreadState.Wait &&
t.WaitReason == ThreadWaitReason.Suspended))
{
proc.Refresh();
Thread.Sleep(50);
if (retryCount > 100)
throw new InvalidOperationException(String.Format("No threads were suspended in target PID {0} after 5s", pid));
}
foreach(var thread in proc.Threads.OfType<ProcessThread>().Where(t => t.ThreadState == System.Diagnostics.ThreadState.Wait &&
t.WaitReason == ThreadWaitReason.Suspended))
ResumeThreadById(thread.Id);
}
}
[StructLayout(LayoutKind.Sequential)]
public class SECURITY_ATTRIBUTES
{
public int nLength;
public IntPtr lpSecurityDescriptor;
public bool bInheritHandle = false;
public SECURITY_ATTRIBUTES() {
nLength = Marshal.SizeOf(this);
}
}
[StructLayout(LayoutKind.Sequential)]
public class STARTUPINFO
{
public Int32 cb;
public IntPtr lpReserved;
public IntPtr lpDesktop;
public IntPtr lpTitle;
public Int32 dwX;
public Int32 dwY;
public Int32 dwXSize;
public Int32 dwYSize;
public Int32 dwXCountChars;
public Int32 dwYCountChars;
public Int32 dwFillAttribute;
public Int32 dwFlags;
public Int16 wShowWindow;
public Int16 cbReserved2;
public IntPtr lpReserved2;
public IntPtr hStdInput;
public IntPtr hStdOutput;
public IntPtr hStdError;
public STARTUPINFO() {
cb = Marshal.SizeOf(this);
}
}
[StructLayout(LayoutKind.Sequential)]
public class STARTUPINFOEX {
public STARTUPINFO startupInfo;
public IntPtr lpAttributeList;
public STARTUPINFOEX() {
startupInfo = new STARTUPINFO();
startupInfo.cb = Marshal.SizeOf(this);
}
}
[StructLayout(LayoutKind.Sequential)]
public struct PROCESS_INFORMATION
{
public IntPtr hProcess;
public IntPtr hThread;
public int dwProcessId;
public int dwThreadId;
}
[Flags]
enum ThreadAccessRights : uint
{
SUSPEND_RESUME = 0x0002
}
[Flags]
public enum StartupInfoFlags : uint
{
USESTDHANDLES = 0x00000100
}
public enum StandardHandleValues : int
{
STD_INPUT_HANDLE = -10,
STD_OUTPUT_HANDLE = -11,
STD_ERROR_HANDLE = -12
}
[Flags]
public enum HandleFlags : uint
{
None = 0,
INHERIT = 1
}
}
"@ # END Ansible.Async native type definition
# calculate the result path so we can include it in the worker payload
$jid = $payload.async_jid
$local_jid = $jid + "." + $pid
$results_path = [System.IO.Path]::Combine($env:LOCALAPPDATA, ".ansible_async", $local_jid)
$payload.async_results_path = $results_path
[System.IO.Directory]::CreateDirectory([System.IO.Path]::GetDirectoryName($results_path)) | Out-Null
Add-Type -TypeDefinition $native_process_util -Debug:$false
# FUTURE: create under new job to ensure all children die on exit?
# FUTURE: move these flags into C# enum?
# start process suspended + breakaway so we can record the watchdog pid without worrying about a completion race
Set-Variable CREATE_BREAKAWAY_FROM_JOB -Value ([uint32]0x01000000) -Option Constant
Set-Variable CREATE_SUSPENDED -Value ([uint32]0x00000004) -Option Constant
Set-Variable CREATE_UNICODE_ENVIRONMENT -Value ([uint32]0x000000400) -Option Constant
Set-Variable CREATE_NEW_CONSOLE -Value ([uint32]0x00000010) -Option Constant
Set-Variable EXTENDED_STARTUPINFO_PRESENT -Value ([uint32]0x00080000) -Option Constant
$pstartup_flags = $CREATE_BREAKAWAY_FROM_JOB -bor $CREATE_UNICODE_ENVIRONMENT -bor $CREATE_NEW_CONSOLE `
-bor $CREATE_SUSPENDED -bor $EXTENDED_STARTUPINFO_PRESENT
# execute the dynamic watchdog as a breakway process to free us from the WinRM job, which will in turn exec the module
$si = New-Object Ansible.Async.STARTUPINFOEX
# setup stdin redirection, we'll leave stdout/stderr as normal
$si.startupInfo.dwFlags = [Ansible.Async.StartupInfoFlags]::USESTDHANDLES
$si.startupInfo.hStdOutput = [Ansible.Async.NativeProcessUtil]::GetStdHandle([Ansible.Async.StandardHandleValues]::STD_OUTPUT_HANDLE)
$si.startupInfo.hStdError = [Ansible.Async.NativeProcessUtil]::GetStdHandle([Ansible.Async.StandardHandleValues]::STD_ERROR_HANDLE)
$stdin_read = $stdin_write = 0
$pipesec = New-Object Ansible.Async.SECURITY_ATTRIBUTES
$pipesec.bInheritHandle = $true
If(-not [Ansible.Async.NativeProcessUtil]::CreatePipe([ref]$stdin_read, [ref]$stdin_write, $pipesec, 0)) {
throw "Stdin pipe setup failed, Win32Error: $([System.Runtime.InteropServices.Marshal]::GetLastWin32Error())"
}
If(-not [Ansible.Async.NativeProcessUtil]::SetHandleInformation($stdin_write, [Ansible.Async.HandleFlags]::INHERIT, 0)) {
throw "Stdin handle setup failed, Win32Error: $([System.Runtime.InteropServices.Marshal]::GetLastWin32Error())"
}
$si.startupInfo.hStdInput = $stdin_read
# create an attribute list with our explicit handle inheritance list to pass to CreateProcess
[int]$buf_sz = 0
# determine the buffer size necessary for our attribute list
If(-not [Ansible.Async.NativeProcessUtil]::InitializeProcThreadAttributeList([IntPtr]::Zero, 1, 0, [ref]$buf_sz)) {
$last_err = [System.Runtime.InteropServices.Marshal]::GetLastWin32Error()
If($last_err -ne 122) { # ERROR_INSUFFICIENT_BUFFER
throw "Attribute list size query failed, Win32Error: $last_err"
}
}
$si.lpAttributeList = [System.Runtime.InteropServices.Marshal]::AllocHGlobal($buf_sz)
# initialize the attribute list
If(-not [Ansible.Async.NativeProcessUtil]::InitializeProcThreadAttributeList($si.lpAttributeList, 1, 0, [ref]$buf_sz)) {
throw "Attribute list init failed, Win32Error: $([System.Runtime.InteropServices.Marshal]::GetLastWin32Error())"
}
$handles_to_inherit = [IntPtr[]]@($stdin_read)
$pinned_handles = [System.Runtime.InteropServices.GCHandle]::Alloc($handles_to_inherit, [System.Runtime.InteropServices.GCHandleType]::Pinned)
# update the attribute list with the handles we want to inherit
If(-not [Ansible.Async.NativeProcessUtil]::UpdateProcThreadAttribute($si.lpAttributeList, 0, 0x20002 <# PROC_THREAD_ATTRIBUTE_HANDLE_LIST #>, `
$pinned_handles.AddrOfPinnedObject(), [System.Runtime.InteropServices.Marshal]::SizeOf([type][IntPtr]) * $handles_to_inherit.Length, `
[System.IntPtr]::Zero, [System.IntPtr]::Zero)) {
throw "Attribute list update failed, Win32Error: $([System.Runtime.InteropServices.Marshal]::GetLastWin32Error())"
}
# need to use a preamble-free version of UTF8Encoding
$utf8_encoding = New-Object System.Text.UTF8Encoding @($false)
$stdin_fs = New-Object System.IO.FileStream @($stdin_write, [System.IO.FileAccess]::Write, $true, 32768)
$stdin = New-Object System.IO.StreamWriter @($stdin_fs, $utf8_encoding, 32768)
$pi = New-Object Ansible.Async.PROCESS_INFORMATION
$encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($exec_wrapper.ToString()))
# FUTURE: direct cmdline CreateProcess path lookup fails- this works but is sub-optimal
$exec_cmd = [Ansible.Async.NativeProcessUtil]::SearchPath("powershell.exe")
$exec_args = New-Object System.Text.StringBuilder @("`"$exec_cmd`" -NonInteractive -NoProfile -ExecutionPolicy Bypass -EncodedCommand $encoded_command")
# TODO: use proper Win32Exception + error
If(-not [Ansible.Async.NativeProcessUtil]::CreateProcess($exec_cmd, $exec_args,
[IntPtr]::Zero, [IntPtr]::Zero, $true, $pstartup_flags, [IntPtr]::Zero, $env:windir, $si, [ref]$pi)) {
#throw New-Object System.ComponentModel.Win32Exception
throw "Worker creation failed, Win32Error: $([System.Runtime.InteropServices.Marshal]::GetLastWin32Error())"
}
# FUTURE: watch process for quick exit, capture stdout/stderr and return failure
$watchdog_pid = $pi.dwProcessId
[Ansible.Async.NativeProcessUtil]::ResumeProcessById($watchdog_pid)
# once process is resumed, we can send payload over stdin
$payload_string = $payload | ConvertTo-Json -Depth 99 -Compress
$stdin.WriteLine($payload_string)
$stdin.Close()
# populate initial results before we resume the process to avoid result race
$result = @{
started=1;
finished=0;
results_file=$results_path;
ansible_job_id=$local_jid;
_ansible_suppress_tmpdir_delete=$true;
ansible_async_watchdog_pid=$watchdog_pid
}
$result_json = ConvertTo-Json $result
Set-Content $results_path -Value $result_json
return $result_json
}
''' # end async_wrapper
async_watchdog = br'''
Set-StrictMode -Version 2
$ErrorActionPreference = "Stop"
Add-Type -AssemblyName System.Web.Extensions
Function Log {
Param(
[string]$msg
)
If(Get-Variable -Name log_path -ErrorAction SilentlyContinue) {
Add-Content $log_path $msg
}
}
Function Deserialize-Json {
Param(
[Parameter(ValueFromPipeline=$true)]
[string]$json
)
# FUTURE: move this into module_utils/powershell.ps1 and use for everything (sidestep PSCustomObject issues)
# FUTURE: won't work w/ Nano Server/.NET Core- fallback to DataContractJsonSerializer (which can't handle dicts on .NET 4.0)
Log "Deserializing:`n$json"
$jss = New-Object System.Web.Script.Serialization.JavaScriptSerializer
return $jss.DeserializeObject($json)
}
Function Write-Result {
Param(
[hashtable]$result,
[string]$resultfile_path
)
$result | ConvertTo-Json | Set-Content -Path $resultfile_path
}
Function Run($payload) {
$actions = $payload.actions
# pop 0th action as entrypoint
$entrypoint = $payload.($actions[0])
$entrypoint = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($entrypoint))
$payload.actions = $payload.actions[1..99]
$resultfile_path = $payload.async_results_path
$max_exec_time_sec = $payload.async_timeout_sec
Log "deserializing existing resultfile args"
# read in existing resultsfile to merge w/ module output (it should be written by the time we're unsuspended and running)
$result = Get-Content $resultfile_path -Raw | Deserialize-Json
Log "deserialized result is $($result | Out-String)"
Log "creating runspace"
$rs = [runspacefactory]::CreateRunspace()
$rs.Open()
Log "creating Powershell object"
$job = [powershell]::Create()
$job.Runspace = $rs
$job.AddScript($entrypoint) | Out-Null
$job.AddStatement().AddCommand("Run").AddArgument($payload) | Out-Null
Log "job BeginInvoke()"
$job_asyncresult = $job.BeginInvoke()
Log "waiting $max_exec_time_sec seconds for job to complete"
$signaled = $job_asyncresult.AsyncWaitHandle.WaitOne($max_exec_time_sec * 1000)
$result["finished"] = 1
If($job_asyncresult.IsCompleted) {
Log "job completed, calling EndInvoke()"
$job_output = $job.EndInvoke($job_asyncresult)
$job_error = $job.Streams.Error
Log "raw module stdout: \r\n$job_output"
If($job_error) {
Log "raw module stderr: \r\n$job_error"
}
# write success/output/error to result object
# TODO: cleanse leading/trailing junk
Try {
$module_result = Deserialize-Json $job_output
# TODO: check for conflicting keys
$result = $result + $module_result
}
Catch {
$excep = $_
$result.failed = $true
$result.msg = "failed to parse module output: $excep"
}
# TODO: determine success/fail, or always include stderr if nonempty?
Write-Result $result $resultfile_path
Log "wrote output to $resultfile_path"
}
Else {
$job.BeginStop($null, $null) | Out-Null # best effort stop
# write timeout to result object
$result.failed = $true
$result.msg = "timed out waiting for module completion"
Write-Result $result $resultfile_path
Log "wrote timeout to $resultfile_path"
}
# in the case of a hung pipeline, this will cause the process to stay alive until it's un-hung...
#$rs.Close() | Out-Null
}
''' # end async_watchdog
class ShellModule(object):
# Common shell filenames that this plugin handles
# Powershell is handled differently. It's selected when winrm is the
# connection
COMPATIBLE_SHELLS = frozenset()
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'powershell'
env = dict()
# We're being overly cautious about which keys to accept (more so than
# the Windows environment is capable of doing), since the powershell
# env provider's limitations don't appear to be documented.
safe_envkey = re.compile(r'^[\d\w_]{1,255}$')
# TODO: implement module transfer
# TODO: implement #Requires -Modules parser/locator
# TODO: add KEEP_REMOTE_FILES support + debug wrapper dump
# TODO: add binary module support
def assert_safe_env_key(self, key):
if not self.safe_envkey.match(key):
raise AnsibleError("Invalid PowerShell environment key: %s" % key)
return key
def safe_env_value(self, key, value):
if len(value) > 32767:
raise AnsibleError("PowerShell environment value for key '%s' exceeds 32767 characters in length" % key)
# powershell single quoted literals need single-quote doubling as their only escaping
value = value.replace("'", "''")
return to_text(value, errors='surrogate_or_strict')
def env_prefix(self, **kwargs):
# powershell/winrm env handling is handled in the exec wrapper
return ""
def join_path(self, *args):
parts = []
for arg in args:
arg = self._unquote(arg).replace('/', '\\')
parts.extend([a for a in arg.split('\\') if a])
path = '\\'.join(parts)
if path.startswith('~'):
return path
return '\'%s\'' % path
def get_remote_filename(self, pathname):
# powershell requires that script files end with .ps1
base_name = os.path.basename(pathname.strip())
name, ext = os.path.splitext(base_name.strip())
if ext.lower() not in ['.ps1', '.exe']:
return name + '.ps1'
return base_name.strip()
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
path = self._unquote(path)
return path.endswith('/') or path.endswith('\\')
def chmod(self, paths, mode):
raise NotImplementedError('chmod is not implemented for Powershell')
def chown(self, paths, user):
raise NotImplementedError('chown is not implemented for Powershell')
def set_user_facl(self, paths, user, mode):
raise NotImplementedError('set_user_facl is not implemented for Powershell')
def remove(self, path, recurse=False):
path = self._escape(self._unquote(path))
if recurse:
return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item "%s" -Force;''' % path)
def mkdtemp(self, basefile, system=False, mode=None, tmpdir=None):
basefile = self._escape(self._unquote(basefile))
# FIXME: Support system temp path and passed in tmpdir!
return self._encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
def expand_user(self, user_home_path):
# PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
# not seem to work remotely, though by default we are always starting
# in the user's home directory.
user_home_path = self._unquote(user_home_path)
if user_home_path == '~':
script = 'Write-Host (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = 'Write-Host ((Get-Location).Path + "%s")' % self._escape(user_home_path[1:])
else:
script = 'Write-Host "%s"' % self._escape(user_home_path)
return self._encode_script(script)
def exists(self, path):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path "%s")
{
$res = 0;
}
Else
{
$res = 1;
}
Write-Host "$res";
Exit $res;
''' % path
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "3";
}
Else
{
Write-Host "1";
}
''' % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
# pipelining bypass
if cmd == '':
return ''
# non-pipelining
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_text, cmd_parts)
if shebang and shebang.lower() == '#!powershell':
if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
cmd_parts.insert(0, '&')
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
elif not shebang:
# The module is assumed to be a binary
cmd_parts[0] = self._unquote(cmd_parts[0])
cmd_parts.append(arg_path)
script = '''
Try
{
%s
%s
}
Catch
{
$_obj = @{ failed = $true }
If ($_.Exception.GetType)
{
$_obj.Add('msg', $_.Exception.Message)
}
Else
{
$_obj.Add('msg', $_.ToString())
}
If ($_.InvocationInfo.PositionMessage)
{
$_obj.Add('exception', $_.InvocationInfo.PositionMessage)
}
ElseIf ($_.ScriptStackTrace)
{
$_obj.Add('exception', $_.ScriptStackTrace)
}
Try
{
$_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
}
Catch
{
}
Echo $_obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
''' % (env_string, ' '.join(cmd_parts))
if rm_tmp:
rm_tmp = self._escape(self._unquote(rm_tmp))
rm_cmd = 'Remove-Item "%s" -Force -Recurse -ErrorAction SilentlyContinue' % rm_tmp
script = '%s\nFinally { %s }' % (script, rm_cmd)
return self._encode_script(script, preserve_rc=False)
def wrap_for_exec(self, cmd):
return '& %s' % cmd
def _unquote(self, value):
'''Remove any matching quotes that wrap the given value.'''
value = to_text(value or '')
m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
if m:
return m.group(1)
m = re.match(r'^\s*?"(.*?)"\s*?$', value)
if m:
return m.group(1)
return value
def _escape(self, value, include_vars=False):
'''Return value escaped for use in PowerShell command.'''
# http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
# http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
('\'', '`\''), ('`', '``'), ('\x00', '`0')]
if include_vars:
subs.append(('$', '`$'))
pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
substs = [s for p, s in subs]
replace = lambda m: substs[m.lastindex - 1]
return re.sub(pattern, replace, value)
def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = to_text(script)
if strict_mode:
script = u'Set-StrictMode -Version Latest\r\n%s' % script
# try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
# NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
if preserve_rc:
script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n' % script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = base64.b64encode(script.encode('utf-16-le'))
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
| gpl-3.0 |
incaser/project | project_sla/m2m.py | 25 | 1847 | """
Wrapper for OpenERP's cryptic write conventions for x2many fields.
Example usage:
import m2m
browse_rec.write({'many_ids: m2m.clear())
browse_rec.write({'many_ids: m2m.link(99))
browse_rec.write({'many_ids: m2m.add({'name': 'Monty'}))
browse_rec.write({'many_ids: m2m.replace([98, 99]))
Since returned values are lists, the can be joined using the plus operator:
browse_rec.write({'many_ids: m2m.clear() + m2m.link(99))
(Source: https://github.com/dreispt/openerp-write2many)
"""
def create(values):
""" Create a referenced record """
assert isinstance(values, dict)
return [(0, 0, values)]
def add(values):
""" Intuitive alias for create() """
return create(values)
def write(id, values):
""" Write on referenced record """
assert isinstance(id, int)
assert isinstance(values, dict)
return [(1, id, values)]
def remove(id):
""" Unlink and delete referenced record """
assert isinstance(id, int)
return [(2, id)]
def unlink(id):
""" Unlink but do not delete the referenced record """
assert isinstance(id, int)
return [(3, id)]
def link(id):
""" Link but do not delete the referenced record """
assert isinstance(id, int)
return [(4, id)]
def clear():
""" Unlink all referenced records (doesn't delete them) """
return [(5, 0)]
def replace(ids):
""" Unlink all current records and replace them with a new list """
assert isinstance(ids, list)
return [(6, 0, ids)]
if __name__ == "__main__":
# Tests:
assert create({'name': 'Monty'}) == [(0, 0, {'name': 'Monty'})]
assert write(99, {'name': 'Monty'}) == [(1, 99, {'name': 'Monty'})]
assert remove(99) == [(2, 99)]
assert unlink(99) == [(3, 99)]
assert clear() == [(5, 0)]
assert replace([97, 98, 99]) == [(6, 0, [97, 98, 99])]
| agpl-3.0 |
joshbruning/selenium | py/selenium/webdriver/phantomjs/webdriver.py | 21 | 3111 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
class WebDriver(RemoteWebDriver):
"""
Wrapper to communicate with PhantomJS through Ghostdriver.
You will need to follow all the directions here:
https://github.com/detro/ghostdriver
"""
def __init__(self, executable_path="phantomjs",
port=0, desired_capabilities=DesiredCapabilities.PHANTOMJS,
service_args=None, service_log_path=None):
"""
Creates a new instance of the PhantomJS / Ghostdriver.
Starts the service and then creates new instance of the driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- service_args : A List of command line arguments to pass to PhantomJS
- service_log_path: Path for phantomjs service to log to.
"""
warnings.warn('Selenium support for PhantomJS has been deprecated, please use headless '
'versions of Chrome or Firefox instead')
self.service = Service(
executable_path,
port=port,
service_args=service_args,
log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(
self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities)
except Exception:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the PhantomJS executable
that is started when starting the PhantomJS
"""
try:
RemoteWebDriver.quit(self)
except Exception:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
| apache-2.0 |
tymiles003/openwebrtc | owr/symbols_to_source.py | 32 | 2437 | """
Copyright (c) 2014-2015, Ericsson AB. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
"""
def symbols_to_source(infile_name, outfile_name, platform):
with open(infile_name) as infile, open(outfile_name, "w") as outfile:
symbols = []
for line in infile:
split = line.split(' if ')
if not split[1:] or platform in [p.strip() for p in split[1].split(' or ')]:
symbols.append(split[0].strip())
outfile.write("#include <stdlib.h>\n")
outfile.write("#include <stdint.h>\n")
outfile.writelines(["extern void *%s;\n" % symbol for symbol in symbols])
outfile.write("\nvoid *_%s(void)\n{\n " % outfile_name.split(".")[0])
outfile.write("uintptr_t ret = 0;\n ")
lines = ["ret |= (uintptr_t) %s" % symbol for symbol in symbols]
outfile.writelines(";\n ".join(lines))
outfile.write(";\n ")
outfile.write("return (void *) ret;\n}\n\n")
if __name__ == "__main__":
import sys
if (len(sys.argv) < 4):
print "Usage: %s <infile> <outfile> <platform>" % sys.argv[0]
exit(1)
symbols_to_source(sys.argv[1], sys.argv[2], sys.argv[3])
| bsd-2-clause |
RanadeepPolavarapu/kuma | vendor/packages/logilab/common/test/unittest_umessage.py | 7 | 2484 | # encoding: iso-8859-15
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
import sys
import email
from os.path import join, dirname, abspath
from six import text_type
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.umessage import UMessage, decode_QP
DATA = join(dirname(abspath(__file__)), 'data')
class UMessageTC(TestCase):
def setUp(self):
if sys.version_info >= (3, 2):
import io
msg1 = email.message_from_file(io.open(join(DATA, 'test1.msg'), encoding='utf8'))
msg2 = email.message_from_file(io.open(join(DATA, 'test2.msg'), encoding='utf8'))
else:
msg1 = email.message_from_file(open(join(DATA, 'test1.msg')))
msg2 = email.message_from_file(open(join(DATA, 'test2.msg')))
self.umessage1 = UMessage(msg1)
self.umessage2 = UMessage(msg2)
def test_get_subject(self):
subj = self.umessage2.get('Subject')
self.assertEqual(type(subj), text_type)
self.assertEqual(subj, u'À LA MER')
def test_get_all(self):
to = self.umessage2.get_all('To')
self.assertEqual(type(to[0]), text_type)
self.assertEqual(to, [u'élément à accents <[email protected]>'])
def test_get_payload_no_multi(self):
payload = self.umessage1.get_payload()
self.assertEqual(type(payload), text_type)
def test_decode_QP(self):
test_line = '=??b?UmFwaGHrbA==?= DUPONT<[email protected]>'
test = decode_QP(test_line)
self.assertEqual(type(test), text_type)
self.assertEqual(test, u'Raphaël DUPONT<[email protected]>')
if __name__ == '__main__':
unittest_main()
| mpl-2.0 |
aspectron/jsx | extern/v8/tools/testrunner/local/statusfile.py | 41 | 4409 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
FAIL = "FAIL"
PASS = "PASS"
OKAY = "OKAY"
TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
FLAKY = "FLAKY"
NO_VARIANTS = "NO_VARIANTS"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
NO_VARIANTS, PASS_OR_FAIL, ALWAYS]:
KEYWORDS[key] = key
DEFS = {FAIL_OK: [FAIL, OKAY],
PASS_OR_FAIL: [PASS, FAIL]}
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
"nacl_x64", "macos", "windows", "linux"]:
VARIABLES[var] = var
def DoSkip(outcomes):
return SKIP in outcomes
def IsSlow(outcomes):
return SLOW in outcomes
def OnlyStandardVariant(outcomes):
return NO_VARIANTS in outcomes
def IsFlaky(outcomes):
return FLAKY in outcomes
def IsPassOrFail(outcomes):
return ((PASS in outcomes) and (FAIL in outcomes) and
(not CRASH in outcomes) and (not OKAY in outcomes))
def IsFailOk(outcomes):
return (FAIL in outcomes) and (OKAY in outcomes)
def _AddOutcome(result, new):
global DEFS
if new in DEFS:
mapped = DEFS[new]
if type(mapped) == list:
for m in mapped:
_AddOutcome(result, m)
elif type(mapped) == str:
_AddOutcome(result, mapped)
else:
result.add(new)
def _ParseOutcomeList(rule, outcomes, target_dict, variables):
result = set([])
if type(outcomes) == str:
outcomes = [outcomes]
for item in outcomes:
if type(item) == str:
_AddOutcome(result, item)
elif type(item) == list:
if not eval(item[0], variables): continue
for outcome in item[1:]:
assert type(outcome) == str
_AddOutcome(result, outcome)
else:
assert False
if len(result) == 0: return
if rule in target_dict:
target_dict[rule] |= result
else:
target_dict[rule] = result
def ReadStatusFile(path, variables):
with open(path) as f:
global KEYWORDS
contents = eval(f.read(), KEYWORDS)
rules = {}
wildcards = {}
variables.update(VARIABLES)
for section in contents:
assert type(section) == list
assert len(section) == 2
if not eval(section[0], variables): continue
section = section[1]
assert type(section) == dict
for rule in section:
assert type(rule) == str
if rule[-1] == '*':
_ParseOutcomeList(rule, section[rule], wildcards, variables)
else:
_ParseOutcomeList(rule, section[rule], rules, variables)
return rules, wildcards
| mit |
jason-weirather/py-seq-tools | seqtools/statistics/__init__.py | 1 | 2084 | """This module contains many list-based functions to calculate descriptive statistics."""
from math import sqrt
from collections import Counter
def mode(arr):
"""get the most frequent value"""
return max(set(arr),key=arr.count)
def average(arr):
"""average of the values, must have more than 0 entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: average
:rtype: float
"""
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take average\n")
sys.exit()
if len(arr) == 1: return arr[0]
return float(sum(arr))/float(len(arr))
def median(arr):
"""median of the values, must have more than 0 entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: median
:rtype: float
"""
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take average\n")
sys.exit()
if len(arr) == 1: return arr[0]
quot = len(arr)/2
rem = len(arr)%2
if rem != 0:
return sorted(arr)[quot]
return float(sum(sorted(arr)[quot-1:quot+1]))/float(2)
def standard_deviation(arr):
"""standard deviation of the values, must have 2 or more entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: standard deviation
:rtype: float
"""
return sqrt(variance(arr))
def variance(arr):
"""variance of the values, must have 2 or more entries.
:param arr: list of numbers
:type arr: number[] a number array
:return: variance
:rtype: float
"""
avg = average(arr)
return sum([(float(x)-avg)**2 for x in arr])/float(len(arr)-1)
def N50(arr):
"""N50 often used in assessing denovo assembly.
:param arr: list of numbers
:type arr: number[] a number array
:return: N50
:rtype: float
"""
if len(arr) == 0:
sys.stderr.write("ERROR: no content in array to take N50\n")
sys.exit()
tot = sum(arr)
half = float(tot)/float(2)
cummulative = 0
for l in sorted(arr):
cummulative += l
if float(cummulative) > half:
return l
sys.stderr.write("ERROR: problem finding M50\n")
sys.exit()
| apache-2.0 |
tommo/gii | support/waf/waflib/Tools/c_osx.py | 10 | 5517 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy 2008-2010
"""
MacOSX related tools
"""
import os, shutil, sys, platform
from waflib import TaskGen, Task, Build, Options, Utils, Errors
from waflib.TaskGen import taskgen_method, feature, after_method, before_method
app_info = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>%s</string>
</dict>
</plist>
'''
"""
plist template
"""
@feature('c', 'cxx')
def set_macosx_deployment_target(self):
"""
see WAF issue 285 and also and also http://trac.macports.org/ticket/17059
"""
if self.env['MACOSX_DEPLOYMENT_TARGET']:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env['MACOSX_DEPLOYMENT_TARGET']
elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
if Utils.unversioned_sys_platform() == 'darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])
@taskgen_method
def create_bundle_dirs(self, name, out):
"""
Create bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp`
"""
bld = self.bld
dir = out.parent.find_or_declare(name)
dir.mkdir()
macos = dir.find_or_declare(['Contents', 'MacOS'])
macos.mkdir()
return dir
def bundle_name_for_output(out):
name = out.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + '.app'
else:
name = name + '.app'
return name
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macapp(self):
"""
To compile an executable into a Mac application (a .app), set its *mac_app* attribute::
def build(bld):
bld.shlib(source='a.c', target='foo', mac_app = True)
To force *all* executables to be transformed into Mac applications::
def build(bld):
bld.env.MACAPP = True
bld.shlib(source='a.c', target='foo')
"""
if self.env['MACAPP'] or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'MacOS', out.name])
self.apptask = self.create_task('macapp', self.link_task.outputs, n1)
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name
self.bld.install_files(inst_to, n1, chmod=Utils.O755)
if getattr(self, 'mac_resources', None):
res_dir = n1.parent.parent.make_node('Resources')
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name
for x in self.to_list(self.mac_resources):
node = self.path.find_node(x)
if not node:
raise Errors.WafError('Missing mac_resource %r in %r' % (x, self))
parent = node.parent
if os.path.isdir(node.abspath()):
nodes = node.ant_glob('**')
else:
nodes = [node]
for node in nodes:
rel = node.path_from(parent)
tsk = self.create_task('macapp', node, res_dir.make_node(rel))
self.bld.install_as(inst_to + '/%s' % rel, node)
if getattr(self.bld, 'is_install', None):
# disable the normal binary installation
self.install_task.hasrun = Task.SKIP_ME
@feature('cprogram', 'cxxprogram')
@after_method('apply_link')
def create_task_macplist(self):
"""
Create a :py:class:`waflib.Tools.c_osx.macplist` instance.
"""
if self.env['MACAPP'] or getattr(self, 'mac_app', False):
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'Info.plist'])
self.plisttask = plisttask = self.create_task('macplist', [], n1)
if getattr(self, 'mac_plist', False):
node = self.path.find_resource(self.mac_plist)
if node:
plisttask.inputs.append(node)
else:
plisttask.code = self.mac_plist
else:
plisttask.code = app_info % self.link_task.outputs[0].name
inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name
self.bld.install_files(inst_to, n1)
@feature('cshlib', 'cxxshlib')
@before_method('apply_link', 'propagate_uselib_vars')
def apply_bundle(self):
"""
To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute::
def build(bld):
bld.shlib(source='a.c', target='foo', mac_bundle = True)
To force *all* executables to be transformed into bundles::
def build(bld):
bld.env.MACBUNDLE = True
bld.shlib(source='a.c', target='foo')
"""
if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False):
self.env['LINKFLAGS_cshlib'] = self.env['LINKFLAGS_cxxshlib'] = [] # disable the '-dynamiclib' flag
self.env['cshlib_PATTERN'] = self.env['cxxshlib_PATTERN'] = self.env['macbundle_PATTERN']
use = self.use = self.to_list(getattr(self, 'use', []))
if not 'MACBUNDLE' in use:
use.append('MACBUNDLE')
app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources']
class macapp(Task.Task):
"""
Create mac applications
"""
color = 'PINK'
def run(self):
self.outputs[0].parent.mkdir()
shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath())
class macplist(Task.Task):
"""
Create plist files
"""
color = 'PINK'
ext_in = ['.bin']
def run(self):
if getattr(self, 'code', None):
txt = self.code
else:
txt = self.inputs[0].read()
self.outputs[0].write(txt)
| mit |
jaggu303619/asylum | openerp/addons/google_base_account/google_base_account.py | 53 | 1297 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_users(osv.osv):
_inherit = "res.users"
_columns = {
'gmail_user': fields.char('Username', size=64,),
'gmail_password': fields.char('Password', size=64),
}
res_users()
# vim:expandtab:smartindent:toabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
justathoughtor2/atomicApe | cygwin/lib/python2.7/site-packages/docutils/utils/math/latex2mathml.py | 104 | 17213 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# :Id: $Id: latex2mathml.py 7668 2013-06-04 12:46:30Z milde $
# :Copyright: © 2010 Günter Milde.
# Based on rst2mathml.py from the latex_math sandbox project
# © 2005 Jens Jørgen Mortensen
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""Convert LaTex math code into presentational MathML"""
# Based on the `latex_math` sandbox project by Jens Jørgen Mortensen
import docutils.utils.math.tex2unichar as tex2unichar
# TeX spacing combining
over = {'acute': u'\u00B4', # u'\u0301',
'bar': u'\u00AF', # u'\u0304',
'breve': u'\u02D8', # u'\u0306',
'check': u'\u02C7', # u'\u030C',
'dot': u'\u02D9', # u'\u0307',
'ddot': u'\u00A8', # u'\u0308',
'dddot': u'\u20DB',
'grave': u'`', # u'\u0300',
'hat': u'^', # u'\u0302',
'mathring': u'\u02DA', # u'\u030A',
'overleftrightarrow': u'\u20e1',
# 'overline': # u'\u0305',
'tilde': u'\u02DC', # u'\u0303',
'vec': u'\u20D7'}
Greek = { # Capital Greek letters: (upright in TeX style)
'Phi':u'\u03a6', 'Xi':u'\u039e', 'Sigma':u'\u03a3',
'Psi':u'\u03a8', 'Delta':u'\u0394', 'Theta':u'\u0398',
'Upsilon':u'\u03d2', 'Pi':u'\u03a0', 'Omega':u'\u03a9',
'Gamma':u'\u0393', 'Lambda':u'\u039b'}
letters = tex2unichar.mathalpha
special = tex2unichar.mathbin # Binary symbols
special.update(tex2unichar.mathrel) # Relation symbols, arrow symbols
special.update(tex2unichar.mathord) # Miscellaneous symbols
special.update(tex2unichar.mathop) # Variable-sized symbols
special.update(tex2unichar.mathopen) # Braces
special.update(tex2unichar.mathclose) # Braces
special.update(tex2unichar.mathfence)
sumintprod = ''.join([special[symbol] for symbol in
['sum', 'int', 'oint', 'prod']])
functions = ['arccos', 'arcsin', 'arctan', 'arg', 'cos', 'cosh',
'cot', 'coth', 'csc', 'deg', 'det', 'dim',
'exp', 'gcd', 'hom', 'inf', 'ker', 'lg',
'lim', 'liminf', 'limsup', 'ln', 'log', 'max',
'min', 'Pr', 'sec', 'sin', 'sinh', 'sup',
'tan', 'tanh',
'injlim', 'varinjlim', 'varlimsup',
'projlim', 'varliminf', 'varprojlim']
mathbb = {
'A': u'\U0001D538',
'B': u'\U0001D539',
'C': u'\u2102',
'D': u'\U0001D53B',
'E': u'\U0001D53C',
'F': u'\U0001D53D',
'G': u'\U0001D53E',
'H': u'\u210D',
'I': u'\U0001D540',
'J': u'\U0001D541',
'K': u'\U0001D542',
'L': u'\U0001D543',
'M': u'\U0001D544',
'N': u'\u2115',
'O': u'\U0001D546',
'P': u'\u2119',
'Q': u'\u211A',
'R': u'\u211D',
'S': u'\U0001D54A',
'T': u'\U0001D54B',
'U': u'\U0001D54C',
'V': u'\U0001D54D',
'W': u'\U0001D54E',
'X': u'\U0001D54F',
'Y': u'\U0001D550',
'Z': u'\u2124',
}
mathscr = {
'A': u'\U0001D49C',
'B': u'\u212C', # bernoulli function
'C': u'\U0001D49E',
'D': u'\U0001D49F',
'E': u'\u2130',
'F': u'\u2131',
'G': u'\U0001D4A2',
'H': u'\u210B', # hamiltonian
'I': u'\u2110',
'J': u'\U0001D4A5',
'K': u'\U0001D4A6',
'L': u'\u2112', # lagrangian
'M': u'\u2133', # physics m-matrix
'N': u'\U0001D4A9',
'O': u'\U0001D4AA',
'P': u'\U0001D4AB',
'Q': u'\U0001D4AC',
'R': u'\u211B',
'S': u'\U0001D4AE',
'T': u'\U0001D4AF',
'U': u'\U0001D4B0',
'V': u'\U0001D4B1',
'W': u'\U0001D4B2',
'X': u'\U0001D4B3',
'Y': u'\U0001D4B4',
'Z': u'\U0001D4B5',
'a': u'\U0001D4B6',
'b': u'\U0001D4B7',
'c': u'\U0001D4B8',
'd': u'\U0001D4B9',
'e': u'\u212F',
'f': u'\U0001D4BB',
'g': u'\u210A',
'h': u'\U0001D4BD',
'i': u'\U0001D4BE',
'j': u'\U0001D4BF',
'k': u'\U0001D4C0',
'l': u'\U0001D4C1',
'm': u'\U0001D4C2',
'n': u'\U0001D4C3',
'o': u'\u2134', # order of
'p': u'\U0001D4C5',
'q': u'\U0001D4C6',
'r': u'\U0001D4C7',
's': u'\U0001D4C8',
't': u'\U0001D4C9',
'u': u'\U0001D4CA',
'v': u'\U0001D4CB',
'w': u'\U0001D4CC',
'x': u'\U0001D4CD',
'y': u'\U0001D4CE',
'z': u'\U0001D4CF',
}
negatables = {'=': u'\u2260',
'\in': u'\u2209',
'\equiv': u'\u2262'}
# LaTeX to MathML translation stuff:
class math:
"""Base class for MathML elements."""
nchildren = 1000000
"""Required number of children"""
def __init__(self, children=None, inline=None):
"""math([children]) -> MathML element
children can be one child or a list of children."""
self.children = []
if children is not None:
if type(children) is list:
for child in children:
self.append(child)
else:
# Only one child:
self.append(children)
if inline is not None:
self.inline = inline
def __repr__(self):
if hasattr(self, 'children'):
return self.__class__.__name__ + '(%s)' % \
','.join([repr(child) for child in self.children])
else:
return self.__class__.__name__
def full(self):
"""Room for more children?"""
return len(self.children) >= self.nchildren
def append(self, child):
"""append(child) -> element
Appends child and returns self if self is not full or first
non-full parent."""
assert not self.full()
self.children.append(child)
child.parent = self
node = self
while node.full():
node = node.parent
return node
def delete_child(self):
"""delete_child() -> child
Delete last child and return it."""
child = self.children[-1]
del self.children[-1]
return child
def close(self):
"""close() -> parent
Close element and return first non-full element."""
parent = self.parent
while parent.full():
parent = parent.parent
return parent
def xml(self):
"""xml() -> xml-string"""
return self.xml_start() + self.xml_body() + self.xml_end()
def xml_start(self):
if not hasattr(self, 'inline'):
return ['<%s>' % self.__class__.__name__]
xmlns = 'http://www.w3.org/1998/Math/MathML'
if self.inline:
return ['<math xmlns="%s">' % xmlns]
else:
return ['<math xmlns="%s" mode="display">' % xmlns]
def xml_end(self):
return ['</%s>' % self.__class__.__name__]
def xml_body(self):
xml = []
for child in self.children:
xml.extend(child.xml())
return xml
class mrow(math):
def xml_start(self):
return ['\n<%s>' % self.__class__.__name__]
class mtable(math):
def xml_start(self):
return ['\n<%s>' % self.__class__.__name__]
class mtr(mrow): pass
class mtd(mrow): pass
class mx(math):
"""Base class for mo, mi, and mn"""
nchildren = 0
def __init__(self, data):
self.data = data
def xml_body(self):
return [self.data]
class mo(mx):
translation = {'<': '<', '>': '>'}
def xml_body(self):
return [self.translation.get(self.data, self.data)]
class mi(mx): pass
class mn(mx): pass
class msub(math):
nchildren = 2
class msup(math):
nchildren = 2
class msqrt(math):
nchildren = 1
class mroot(math):
nchildren = 2
class mfrac(math):
nchildren = 2
class msubsup(math):
nchildren = 3
def __init__(self, children=None, reversed=False):
self.reversed = reversed
math.__init__(self, children)
def xml(self):
if self.reversed:
## self.children[1:3] = self.children[2:0:-1]
self.children[1:3] = [self.children[2], self.children[1]]
self.reversed = False
return math.xml(self)
class mfenced(math):
translation = {'\\{': '{', '\\langle': u'\u2329',
'\\}': '}', '\\rangle': u'\u232A',
'.': ''}
def __init__(self, par):
self.openpar = par
math.__init__(self)
def xml_start(self):
open = self.translation.get(self.openpar, self.openpar)
close = self.translation.get(self.closepar, self.closepar)
return ['<mfenced open="%s" close="%s">' % (open, close)]
class mspace(math):
nchildren = 0
class mstyle(math):
def __init__(self, children=None, nchildren=None, **kwargs):
if nchildren is not None:
self.nchildren = nchildren
math.__init__(self, children)
self.attrs = kwargs
def xml_start(self):
return ['<mstyle '] + ['%s="%s"' % item
for item in self.attrs.items()] + ['>']
class mover(math):
nchildren = 2
def __init__(self, children=None, reversed=False):
self.reversed = reversed
math.__init__(self, children)
def xml(self):
if self.reversed:
self.children.reverse()
self.reversed = False
return math.xml(self)
class munder(math):
nchildren = 2
class munderover(math):
nchildren = 3
def __init__(self, children=None):
math.__init__(self, children)
class mtext(math):
nchildren = 0
def __init__(self, text):
self.text = text
def xml_body(self):
return [self.text]
def parse_latex_math(string, inline=True):
"""parse_latex_math(string [,inline]) -> MathML-tree
Returns a MathML-tree parsed from string. inline=True is for
inline math and inline=False is for displayed math.
tree is the whole tree and node is the current element."""
# Normalize white-space:
string = ' '.join(string.split())
if inline:
node = mrow()
tree = math(node, inline=True)
else:
node = mtd()
tree = math(mtable(mtr(node)), inline=False)
while len(string) > 0:
n = len(string)
c = string[0]
skip = 1 # number of characters consumed
if n > 1:
c2 = string[1]
else:
c2 = ''
## print n, string, c, c2, node.__class__.__name__
if c == ' ':
pass
elif c == '\\':
if c2 in '{}':
node = node.append(mo(c2))
skip = 2
elif c2 == ' ':
node = node.append(mspace())
skip = 2
elif c2 == ',': # TODO: small space
node = node.append(mspace())
skip = 2
elif c2.isalpha():
# We have a LaTeX-name:
i = 2
while i < n and string[i].isalpha():
i += 1
name = string[1:i]
node, skip = handle_keyword(name, node, string[i:])
skip += i
elif c2 == '\\':
# End of a row:
entry = mtd()
row = mtr(entry)
node.close().close().append(row)
node = entry
skip = 2
else:
raise SyntaxError(ur'Syntax error: "%s%s"' % (c, c2))
elif c.isalpha():
node = node.append(mi(c))
elif c.isdigit():
node = node.append(mn(c))
elif c in "+-*/=()[]|<>,.!?':;@":
node = node.append(mo(c))
elif c == '_':
child = node.delete_child()
if isinstance(child, msup):
sub = msubsup(child.children, reversed=True)
elif isinstance(child, mo) and child.data in sumintprod:
sub = munder(child)
else:
sub = msub(child)
node.append(sub)
node = sub
elif c == '^':
child = node.delete_child()
if isinstance(child, msub):
sup = msubsup(child.children)
elif isinstance(child, mo) and child.data in sumintprod:
sup = mover(child)
elif (isinstance(child, munder) and
child.children[0].data in sumintprod):
sup = munderover(child.children)
else:
sup = msup(child)
node.append(sup)
node = sup
elif c == '{':
row = mrow()
node.append(row)
node = row
elif c == '}':
node = node.close()
elif c == '&':
entry = mtd()
node.close().append(entry)
node = entry
else:
raise SyntaxError(ur'Illegal character: "%s"' % c)
string = string[skip:]
return tree
def handle_keyword(name, node, string):
skip = 0
if len(string) > 0 and string[0] == ' ':
string = string[1:]
skip = 1
if name == 'begin':
if not string.startswith('{matrix}'):
raise SyntaxError(u'Environment not supported! '
u'Supported environment: "matrix".')
skip += 8
entry = mtd()
table = mtable(mtr(entry))
node.append(table)
node = entry
elif name == 'end':
if not string.startswith('{matrix}'):
raise SyntaxError(ur'Expected "\end{matrix}"!')
skip += 8
node = node.close().close().close()
elif name in ('text', 'mathrm'):
if string[0] != '{':
raise SyntaxError(ur'Expected "\text{...}"!')
i = string.find('}')
if i == -1:
raise SyntaxError(ur'Expected "\text{...}"!')
node = node.append(mtext(string[1:i]))
skip += i + 1
elif name == 'sqrt':
sqrt = msqrt()
node.append(sqrt)
node = sqrt
elif name == 'frac':
frac = mfrac()
node.append(frac)
node = frac
elif name == 'left':
for par in ['(', '[', '|', '\\{', '\\langle', '.']:
if string.startswith(par):
break
else:
raise SyntaxError(u'Missing left-brace!')
fenced = mfenced(par)
node.append(fenced)
row = mrow()
fenced.append(row)
node = row
skip += len(par)
elif name == 'right':
for par in [')', ']', '|', '\\}', '\\rangle', '.']:
if string.startswith(par):
break
else:
raise SyntaxError(u'Missing right-brace!')
node = node.close()
node.closepar = par
node = node.close()
skip += len(par)
elif name == 'not':
for operator in negatables:
if string.startswith(operator):
break
else:
raise SyntaxError(ur'Expected something to negate: "\not ..."!')
node = node.append(mo(negatables[operator]))
skip += len(operator)
elif name == 'mathbf':
style = mstyle(nchildren=1, fontweight='bold')
node.append(style)
node = style
elif name == 'mathbb':
if string[0] != '{' or not string[1].isupper() or string[2] != '}':
raise SyntaxError(ur'Expected something like "\mathbb{A}"!')
node = node.append(mi(mathbb[string[1]]))
skip += 3
elif name in ('mathscr', 'mathcal'):
if string[0] != '{' or string[2] != '}':
raise SyntaxError(ur'Expected something like "\mathscr{A}"!')
node = node.append(mi(mathscr[string[1]]))
skip += 3
elif name == 'colon': # "normal" colon, not binary operator
node = node.append(mo(':')) # TODO: add ``lspace="0pt"``
elif name in Greek: # Greek capitals (upright in "TeX style")
node = node.append(mo(Greek[name]))
# TODO: "ISO style" sets them italic. Could we use a class argument
# to enable styling via CSS?
elif name in letters:
node = node.append(mi(letters[name]))
elif name in special:
node = node.append(mo(special[name]))
elif name in functions:
node = node.append(mo(name))
elif name in over:
ovr = mover(mo(over[name]), reversed=True)
node.append(ovr)
node = ovr
else:
raise SyntaxError(u'Unknown LaTeX command: ' + name)
return node, skip
| gpl-3.0 |
xianjunzhengbackup/Cloud-Native-Python | env/lib/python3.6/site-packages/setuptools/command/py36compat.py | 286 | 4986 | import os
from glob import glob
from distutils.util import convert_path
from distutils.command import sdist
from setuptools.extern.six.moves import filter
class sdist_add_defaults:
"""
Mix-in providing forward-compatibility for functionality as found in
distutils on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils. Instead, override in the subclass.
"""
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist_add_defaults._cs_path_exists(__file__)
True
>>> sdist_add_defaults._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
def _add_defaults_optional(self):
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
if hasattr(sdist.sdist, '_add_defaults_standards'):
# disable the functionality already available upstream
class sdist_add_defaults:
pass
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.