repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jcoady9/python-for-android | python-modules/twisted/twisted/spread/publish.py | 61 | 4465 | # -*- test-case-name: twisted.test.test_pb -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Persistently cached objects for PB.
Maintainer: Glyph Lefkowitz
Future Plans: None known.
"""
import time
from twisted.internet import defer
from twisted.spread import banana, jelly, flavors
class Publishable(flavors.Cacheable):
"""An object whose cached state persists across sessions.
"""
def __init__(self, publishedID):
self.republish()
self.publishedID = publishedID
def republish(self):
"""Set the timestamp to current and (TODO) update all observers.
"""
self.timestamp = time.time()
def view_getStateToPublish(self, perspective):
'(internal)'
return self.getStateToPublishFor(perspective)
def getStateToPublishFor(self, perspective):
"""Implement me to special-case your state for a perspective.
"""
return self.getStateToPublish()
def getStateToPublish(self):
"""Implement me to return state to copy as part of the publish phase.
"""
raise NotImplementedError("%s.getStateToPublishFor" % self.__class__)
def getStateToCacheAndObserveFor(self, perspective, observer):
"""Get all necessary metadata to keep a clientside cache.
"""
if perspective:
pname = perspective.perspectiveName
sname = perspective.getService().serviceName
else:
pname = "None"
sname = "None"
return {"remote": flavors.ViewPoint(perspective, self),
"publishedID": self.publishedID,
"perspective": pname,
"service": sname,
"timestamp": self.timestamp}
class RemotePublished(flavors.RemoteCache):
"""The local representation of remote Publishable object.
"""
isActivated = 0
_wasCleanWhenLoaded = 0
def getFileName(self, ext='pub'):
return ("%s-%s-%s.%s" %
(self.service, self.perspective, str(self.publishedID), ext))
def setCopyableState(self, state):
self.__dict__.update(state)
self._activationListeners = []
try:
dataFile = file(self.getFileName(), "rb")
data = dataFile.read()
dataFile.close()
except IOError:
recent = 0
else:
newself = jelly.unjelly(banana.decode(data))
recent = (newself.timestamp == self.timestamp)
if recent:
self._cbGotUpdate(newself.__dict__)
self._wasCleanWhenLoaded = 1
else:
self.remote.callRemote('getStateToPublish').addCallbacks(self._cbGotUpdate)
def __getstate__(self):
other = self.__dict__.copy()
# Remove PB-specific attributes
del other['broker']
del other['remote']
del other['luid']
# remove my own runtime-tracking stuff
del other['_activationListeners']
del other['isActivated']
return other
def _cbGotUpdate(self, newState):
self.__dict__.update(newState)
self.isActivated = 1
# send out notifications
for listener in self._activationListeners:
listener(self)
self._activationListeners = []
self.activated()
dataFile = file(self.getFileName(), "wb")
dataFile.write(banana.encode(jelly.jelly(self)))
dataFile.close()
def activated(self):
"""Implement this method if you want to be notified when your
publishable subclass is activated.
"""
def callWhenActivated(self, callback):
"""Externally register for notification when this publishable has received all relevant data.
"""
if self.isActivated:
callback(self)
else:
self._activationListeners.append(callback)
def whenReady(d):
"""
Wrap a deferred returned from a pb method in another deferred that
expects a RemotePublished as a result. This will allow you to wait until
the result is really available.
Idiomatic usage would look like::
publish.whenReady(serverObject.getMeAPublishable()).addCallback(lookAtThePublishable)
"""
d2 = defer.Deferred()
d.addCallbacks(_pubReady, d2.errback,
callbackArgs=(d2,))
return d2
def _pubReady(result, d2):
'(internal)'
result.callWhenActivated(d2.callback)
| apache-2.0 |
karandpr/Doppler3ICS | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
Intel-Corporation/tensorflow | tensorflow/contrib/learn/python/learn/estimators/composable_model_test.py | 56 | 6358 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ComposableModel classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import composable_model
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
def _iris_input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150, 1], dtype=dtypes.int32)
def _base_model_fn(features, labels, mode, params):
model = params['model']
feature_columns = params['feature_columns']
head = params['head']
if mode == model_fn_lib.ModeKeys.TRAIN:
logits = model.build_model(features, feature_columns, is_training=True)
elif mode == model_fn_lib.ModeKeys.EVAL:
logits = model.build_model(features, feature_columns, is_training=False)
else:
raise NotImplementedError
def _train_op_fn(loss):
global_step = training_util.get_global_step()
assert global_step
train_step = model.get_train_step(loss)
with ops.control_dependencies(train_step):
with ops.get_default_graph().colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def _linear_estimator(head, feature_columns):
return estimator.Estimator(
model_fn=_base_model_fn,
params={
'model':
composable_model.LinearComposableModel(
num_label_columns=head.logits_dimension),
'feature_columns':
feature_columns,
'head':
head
})
def _joint_linear_estimator(head, feature_columns):
return estimator.Estimator(
model_fn=_base_model_fn,
params={
'model':
composable_model.LinearComposableModel(
num_label_columns=head.logits_dimension, _joint_weights=True),
'feature_columns':
feature_columns,
'head':
head
})
def _dnn_estimator(head, feature_columns, hidden_units):
return estimator.Estimator(
model_fn=_base_model_fn,
params={
'model':
composable_model.DNNComposableModel(
num_label_columns=head.logits_dimension,
hidden_units=hidden_units),
'feature_columns':
feature_columns,
'head':
head
})
class ComposableModelTest(test.TestCase):
def testLinearModel(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
head = head_lib.multi_class_head(n_classes=2)
classifier = _linear_estimator(head, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=1000)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=2000)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointLinearModel(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.sparse_column_with_hash_bucket('age', 2)
head = head_lib.multi_class_head(n_classes=2)
classifier = _joint_linear_estimator(head, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=1000)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=2000)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testDNNModel(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
head = head_lib.multi_class_head(n_classes=3)
classifier = _dnn_estimator(
head, feature_columns=cont_features, hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_fn, steps=1000)
classifier.evaluate(input_fn=_iris_input_fn, steps=100)
if __name__ == '__main__':
test.main()
| apache-2.0 |
brandond/ansible | test/units/modules/network/nxos/test_nxos_pim.py | 44 | 2195 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_pim
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosPimModule(TestNxosModule):
module = nxos_pim
def setUp(self):
super(TestNxosPimModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_pim.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_pim.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestNxosPimModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_pim', 'config.cfg')
self.load_config.return_value = None
def test_nxos_pim(self):
set_module_args(dict(ssm_range='232.0.0.0/8'))
self.execute_module(changed=True, commands=['ip pim ssm range 232.0.0.0/8'])
def test_nxos_pim_none(self):
set_module_args(dict(ssm_range='none'))
self.execute_module(changed=True, commands=['ip pim ssm range none'])
def test_nxos_pim_no_change(self):
set_module_args(dict(ssm_range='127.0.0.0/31'))
self.execute_module(changed=False, commands=[])
| gpl-3.0 |
Gitlab11/odoo | addons/bus/bus.py | 325 | 7324 | # -*- coding: utf-8 -*-
import datetime
import json
import logging
import select
import threading
import time
import random
import simplejson
import openerp
from openerp.osv import osv, fields
from openerp.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
TIMEOUT = 50
#----------------------------------------------------------
# Bus
#----------------------------------------------------------
def json_dump(v):
return simplejson.dumps(v, separators=(',', ':'))
def hashable(key):
if isinstance(key, list):
key = tuple(key)
return key
class ImBus(osv.Model):
_name = 'bus.bus'
_columns = {
'id' : fields.integer('Id'),
'create_date' : fields.datetime('Create date'),
'channel' : fields.char('Channel'),
'message' : fields.char('Message'),
}
def gc(self, cr, uid):
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT*2)
domain = [('create_date', '<', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
ids = self.search(cr, openerp.SUPERUSER_ID, domain)
self.unlink(cr, openerp.SUPERUSER_ID, ids)
def sendmany(self, cr, uid, notifications):
channels = set()
for channel, message in notifications:
channels.add(channel)
values = {
"channel" : json_dump(channel),
"message" : json_dump(message)
}
self.pool['bus.bus'].create(cr, openerp.SUPERUSER_ID, values)
cr.commit()
if random.random() < 0.01:
self.gc(cr, uid)
if channels:
with openerp.sql_db.db_connect('postgres').cursor() as cr2:
cr2.execute("notify imbus, %s", (json_dump(list(channels)),))
def sendone(self, cr, uid, channel, message):
self.sendmany(cr, uid, [[channel, message]])
def poll(self, cr, uid, channels, last=0):
# first poll return the notification in the 'buffer'
if last == 0:
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT)
domain = [('create_date', '>', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
else:
# else returns the unread notifications
domain = [('id','>',last)]
channels = [json_dump(c) for c in channels]
domain.append(('channel','in',channels))
notifications = self.search_read(cr, openerp.SUPERUSER_ID, domain)
return [{"id":notif["id"], "channel": simplejson.loads(notif["channel"]), "message":simplejson.loads(notif["message"])} for notif in notifications]
class ImDispatch(object):
def __init__(self):
self.channels = {}
def poll(self, dbname, channels, last, timeout=TIMEOUT):
# Dont hang ctrl-c for a poll request, we need to bypass private
# attribute access because we dont know before starting the thread that
# it will handle a longpolling request
if not openerp.evented:
current = threading.current_thread()
current._Thread__daemonic = True
# rename the thread to avoid tests waiting for a longpolling
current.setName("openerp.longpolling.request.%s" % current.ident)
registry = openerp.registry(dbname)
# immediatly returns if past notifications exist
with registry.cursor() as cr:
notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last)
# or wait for future ones
if not notifications:
event = self.Event()
for c in channels:
self.channels.setdefault(hashable(c), []).append(event)
try:
event.wait(timeout=timeout)
with registry.cursor() as cr:
notifications = registry['bus.bus'].poll(cr, openerp.SUPERUSER_ID, channels, last)
except Exception:
# timeout
pass
return notifications
def loop(self):
""" Dispatch postgres notifications to the relevant polling threads/greenlets """
_logger.info("Bus.loop listen imbus on db postgres")
with openerp.sql_db.db_connect('postgres').cursor() as cr:
conn = cr._cnx
cr.execute("listen imbus")
cr.commit();
while True:
if select.select([conn], [], [], TIMEOUT) == ([],[],[]):
pass
else:
conn.poll()
channels = []
while conn.notifies:
channels.extend(json.loads(conn.notifies.pop().payload))
# dispatch to local threads/greenlets
events = set()
for c in channels:
events.update(self.channels.pop(hashable(c),[]))
for e in events:
e.set()
def run(self):
while True:
try:
self.loop()
except Exception, e:
_logger.exception("Bus.loop error, sleep and retry")
time.sleep(TIMEOUT)
def start(self):
if openerp.evented:
# gevent mode
import gevent
self.Event = gevent.event.Event
gevent.spawn(self.run)
elif openerp.multi_process:
# disabled in prefork mode
return
else:
# threaded mode
self.Event = threading.Event
t = threading.Thread(name="%s.Bus" % __name__, target=self.run)
t.daemon = True
t.start()
return self
dispatch = ImDispatch().start()
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class Controller(openerp.http.Controller):
""" Examples:
openerp.jsonRpc('/longpolling/poll','call',{"channels":["c1"],last:0}).then(function(r){console.log(r)});
openerp.jsonRpc('/longpolling/send','call',{"channel":"c1","message":"m1"});
openerp.jsonRpc('/longpolling/send','call',{"channel":"c2","message":"m2"});
"""
@openerp.http.route('/longpolling/send', type="json", auth="public")
def send(self, channel, message):
if not isinstance(channel, basestring):
raise Exception("bus.Bus only string channels are allowed.")
registry, cr, uid, context = request.registry, request.cr, request.session.uid, request.context
return registry['bus.bus'].sendone(cr, uid, channel, message)
# override to add channels
def _poll(self, dbname, channels, last, options):
request.cr.close()
request._cr = None
return dispatch.poll(dbname, channels, last)
@openerp.http.route('/longpolling/poll', type="json", auth="public")
def poll(self, channels, last, options=None):
if options is None:
options = {}
if not dispatch:
raise Exception("bus.Bus unavailable")
if [c for c in channels if not isinstance(c, basestring)]:
print channels
raise Exception("bus.Bus only string channels are allowed.")
return self._poll(request.db, channels, last, options)
# vim:et:
| agpl-3.0 |
freakboy3742/django | tests/db_functions/comparison/test_coalesce.py | 71 | 2643 | from django.db.models import TextField
from django.db.models.functions import Coalesce, Lower
from django.test import TestCase
from django.utils import timezone
from ..models import Article, Author
lorem_ipsum = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua."""
class CoalesceTests(TestCase):
def test_basic(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.annotate(display_name=Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors.order_by('name'), ['smithj', 'Rhonda'],
lambda a: a.display_name
)
def test_gt_two_expressions(self):
with self.assertRaisesMessage(ValueError, 'Coalesce must take at least two expressions'):
Author.objects.annotate(display_name=Coalesce('alias'))
def test_mixed_values(self):
a1 = Author.objects.create(name='John Smith', alias='smithj')
a2 = Author.objects.create(name='Rhonda')
ar1 = Article.objects.create(
title='How to Django',
text=lorem_ipsum,
written=timezone.now(),
)
ar1.authors.add(a1)
ar1.authors.add(a2)
# mixed Text and Char
article = Article.objects.annotate(
headline=Coalesce('summary', 'text', output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [lorem_ipsum],
lambda a: a.headline
)
# mixed Text and Char wrapped
article = Article.objects.annotate(
headline=Coalesce(Lower('summary'), Lower('text'), output_field=TextField()),
)
self.assertQuerysetEqual(
article.order_by('title'), [lorem_ipsum.lower()],
lambda a: a.headline
)
def test_ordering(self):
Author.objects.create(name='John Smith', alias='smithj')
Author.objects.create(name='Rhonda')
authors = Author.objects.order_by(Coalesce('alias', 'name'))
self.assertQuerysetEqual(
authors, ['Rhonda', 'John Smith'],
lambda a: a.name
)
authors = Author.objects.order_by(Coalesce('alias', 'name').asc())
self.assertQuerysetEqual(
authors, ['Rhonda', 'John Smith'],
lambda a: a.name
)
authors = Author.objects.order_by(Coalesce('alias', 'name').desc())
self.assertQuerysetEqual(
authors, ['John Smith', 'Rhonda'],
lambda a: a.name
)
| bsd-3-clause |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/files/etc/apps/headphones/lib/unidecode/x0d0.py | 253 | 4706 | data = (
'kweon', # 0x00
'kweonj', # 0x01
'kweonh', # 0x02
'kweod', # 0x03
'kweol', # 0x04
'kweolg', # 0x05
'kweolm', # 0x06
'kweolb', # 0x07
'kweols', # 0x08
'kweolt', # 0x09
'kweolp', # 0x0a
'kweolh', # 0x0b
'kweom', # 0x0c
'kweob', # 0x0d
'kweobs', # 0x0e
'kweos', # 0x0f
'kweoss', # 0x10
'kweong', # 0x11
'kweoj', # 0x12
'kweoc', # 0x13
'kweok', # 0x14
'kweot', # 0x15
'kweop', # 0x16
'kweoh', # 0x17
'kwe', # 0x18
'kweg', # 0x19
'kwegg', # 0x1a
'kwegs', # 0x1b
'kwen', # 0x1c
'kwenj', # 0x1d
'kwenh', # 0x1e
'kwed', # 0x1f
'kwel', # 0x20
'kwelg', # 0x21
'kwelm', # 0x22
'kwelb', # 0x23
'kwels', # 0x24
'kwelt', # 0x25
'kwelp', # 0x26
'kwelh', # 0x27
'kwem', # 0x28
'kweb', # 0x29
'kwebs', # 0x2a
'kwes', # 0x2b
'kwess', # 0x2c
'kweng', # 0x2d
'kwej', # 0x2e
'kwec', # 0x2f
'kwek', # 0x30
'kwet', # 0x31
'kwep', # 0x32
'kweh', # 0x33
'kwi', # 0x34
'kwig', # 0x35
'kwigg', # 0x36
'kwigs', # 0x37
'kwin', # 0x38
'kwinj', # 0x39
'kwinh', # 0x3a
'kwid', # 0x3b
'kwil', # 0x3c
'kwilg', # 0x3d
'kwilm', # 0x3e
'kwilb', # 0x3f
'kwils', # 0x40
'kwilt', # 0x41
'kwilp', # 0x42
'kwilh', # 0x43
'kwim', # 0x44
'kwib', # 0x45
'kwibs', # 0x46
'kwis', # 0x47
'kwiss', # 0x48
'kwing', # 0x49
'kwij', # 0x4a
'kwic', # 0x4b
'kwik', # 0x4c
'kwit', # 0x4d
'kwip', # 0x4e
'kwih', # 0x4f
'kyu', # 0x50
'kyug', # 0x51
'kyugg', # 0x52
'kyugs', # 0x53
'kyun', # 0x54
'kyunj', # 0x55
'kyunh', # 0x56
'kyud', # 0x57
'kyul', # 0x58
'kyulg', # 0x59
'kyulm', # 0x5a
'kyulb', # 0x5b
'kyuls', # 0x5c
'kyult', # 0x5d
'kyulp', # 0x5e
'kyulh', # 0x5f
'kyum', # 0x60
'kyub', # 0x61
'kyubs', # 0x62
'kyus', # 0x63
'kyuss', # 0x64
'kyung', # 0x65
'kyuj', # 0x66
'kyuc', # 0x67
'kyuk', # 0x68
'kyut', # 0x69
'kyup', # 0x6a
'kyuh', # 0x6b
'keu', # 0x6c
'keug', # 0x6d
'keugg', # 0x6e
'keugs', # 0x6f
'keun', # 0x70
'keunj', # 0x71
'keunh', # 0x72
'keud', # 0x73
'keul', # 0x74
'keulg', # 0x75
'keulm', # 0x76
'keulb', # 0x77
'keuls', # 0x78
'keult', # 0x79
'keulp', # 0x7a
'keulh', # 0x7b
'keum', # 0x7c
'keub', # 0x7d
'keubs', # 0x7e
'keus', # 0x7f
'keuss', # 0x80
'keung', # 0x81
'keuj', # 0x82
'keuc', # 0x83
'keuk', # 0x84
'keut', # 0x85
'keup', # 0x86
'keuh', # 0x87
'kyi', # 0x88
'kyig', # 0x89
'kyigg', # 0x8a
'kyigs', # 0x8b
'kyin', # 0x8c
'kyinj', # 0x8d
'kyinh', # 0x8e
'kyid', # 0x8f
'kyil', # 0x90
'kyilg', # 0x91
'kyilm', # 0x92
'kyilb', # 0x93
'kyils', # 0x94
'kyilt', # 0x95
'kyilp', # 0x96
'kyilh', # 0x97
'kyim', # 0x98
'kyib', # 0x99
'kyibs', # 0x9a
'kyis', # 0x9b
'kyiss', # 0x9c
'kying', # 0x9d
'kyij', # 0x9e
'kyic', # 0x9f
'kyik', # 0xa0
'kyit', # 0xa1
'kyip', # 0xa2
'kyih', # 0xa3
'ki', # 0xa4
'kig', # 0xa5
'kigg', # 0xa6
'kigs', # 0xa7
'kin', # 0xa8
'kinj', # 0xa9
'kinh', # 0xaa
'kid', # 0xab
'kil', # 0xac
'kilg', # 0xad
'kilm', # 0xae
'kilb', # 0xaf
'kils', # 0xb0
'kilt', # 0xb1
'kilp', # 0xb2
'kilh', # 0xb3
'kim', # 0xb4
'kib', # 0xb5
'kibs', # 0xb6
'kis', # 0xb7
'kiss', # 0xb8
'king', # 0xb9
'kij', # 0xba
'kic', # 0xbb
'kik', # 0xbc
'kit', # 0xbd
'kip', # 0xbe
'kih', # 0xbf
'ta', # 0xc0
'tag', # 0xc1
'tagg', # 0xc2
'tags', # 0xc3
'tan', # 0xc4
'tanj', # 0xc5
'tanh', # 0xc6
'tad', # 0xc7
'tal', # 0xc8
'talg', # 0xc9
'talm', # 0xca
'talb', # 0xcb
'tals', # 0xcc
'talt', # 0xcd
'talp', # 0xce
'talh', # 0xcf
'tam', # 0xd0
'tab', # 0xd1
'tabs', # 0xd2
'tas', # 0xd3
'tass', # 0xd4
'tang', # 0xd5
'taj', # 0xd6
'tac', # 0xd7
'tak', # 0xd8
'tat', # 0xd9
'tap', # 0xda
'tah', # 0xdb
'tae', # 0xdc
'taeg', # 0xdd
'taegg', # 0xde
'taegs', # 0xdf
'taen', # 0xe0
'taenj', # 0xe1
'taenh', # 0xe2
'taed', # 0xe3
'tael', # 0xe4
'taelg', # 0xe5
'taelm', # 0xe6
'taelb', # 0xe7
'taels', # 0xe8
'taelt', # 0xe9
'taelp', # 0xea
'taelh', # 0xeb
'taem', # 0xec
'taeb', # 0xed
'taebs', # 0xee
'taes', # 0xef
'taess', # 0xf0
'taeng', # 0xf1
'taej', # 0xf2
'taec', # 0xf3
'taek', # 0xf4
'taet', # 0xf5
'taep', # 0xf6
'taeh', # 0xf7
'tya', # 0xf8
'tyag', # 0xf9
'tyagg', # 0xfa
'tyags', # 0xfb
'tyan', # 0xfc
'tyanj', # 0xfd
'tyanh', # 0xfe
'tyad', # 0xff
)
| gpl-2.0 |
jmread/molearn | runDemo.py | 1 | 1406 | #!/usr/bin/python
import sys
from time import *
from numpy import *
from molearn.core import metrics
from sklearn import linear_model
from molearn.classifiers.BR import BR
from molearn.classifiers.CC import CC
set_printoptions(precision=3, suppress=True)
basename = "molearn/data/"
dname="Music.csv"
L = 6
filename=basename+dname
XY = genfromtxt(filename, skip_header=1, delimiter=",")
N,DL = XY.shape
X = XY[:,L:DL]
Y = XY[:,0:L]
N_train = int(N/2)
X_train = X[0:N_train,:]
Y_train = Y[0:N_train,:]
X_test = X[N_train:N,:]
Y_test = Y[N_train:N,:]
h = linear_model.LogisticRegression()
br = BR(L,h)
cc = CC(L,h)
t0 = clock()
br.fit(X_train,Y_train)
print("BR",)
print(metrics.Exact_match(Y_test,br.predict(X_test)),)
print(metrics.Hamming_loss(Y_test,br.predict(X_test)),)
print(clock() - t0)
t0 = clock()
cc.fit(X_train,Y_train)
print("CC",)
print(metrics.Exact_match(Y_test,cc.predict(X_test)),)
print(metrics.Hamming_loss(Y_test,cc.predict(X_test)),)
print(clock() - t0)
from sklearn.tree import DecisionTreeClassifier
from molearn.classifiers.Ensemble import Ensemble
from molearn.classifiers.CC import RCC
ecc = Ensemble(base_estimator=RCC(DecisionTreeClassifier()),n_estimators=10)
t0 = clock()
ecc.fit(X_train,Y_train)
print("ECC",)
print(metrics.Exact_match(Y_test,ecc.predict(X_test)),)
print(metrics.Hamming_loss(Y_test,ecc.predict(X_test)),)
print(clock() - t0)
| gpl-2.0 |
alxgu/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py | 27 | 11396 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_net_facts
short_description: Gather facts about ec2 VPCs in AWS
description:
- Gather facts about ec2 VPCs in AWS
version_added: "2.1"
author: "Rob White (@wimnat)"
requirements:
- boto3
- botocore
options:
vpc_ids:
description:
- A list of VPC IDs that exist in your account.
version_added: "2.5"
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all VPCs
- ec2_vpc_net_facts:
# Gather facts about a particular VPC using VPC ID
- ec2_vpc_net_facts:
vpc_ids: vpc-00112233
# Gather facts about any VPC with a tag key Name and value Example
- ec2_vpc_net_facts:
filters:
"tag:Name": Example
'''
RETURN = '''
vpcs:
description: Returns an array of complex objects as described below.
returned: success
type: complex
contains:
id:
description: The ID of the VPC (for backwards compatibility).
returned: always
type: str
vpc_id:
description: The ID of the VPC .
returned: always
type: str
state:
description: The state of the VPC.
returned: always
type: str
tags:
description: A dict of tags associated with the VPC.
returned: always
type: dict
instance_tenancy:
description: The instance tenancy setting for the VPC.
returned: always
type: str
is_default:
description: True if this is the default VPC for account.
returned: always
type: bool
cidr_block:
description: The IPv4 CIDR block assigned to the VPC.
returned: always
type: str
classic_link_dns_supported:
description: True/False depending on attribute setting for classic link DNS support.
returned: always
type: bool
classic_link_enabled:
description: True/False depending on if classic link support is enabled.
returned: always
type: bool
enable_dns_hostnames:
description: True/False depending on attribute setting for DNS hostnames support.
returned: always
type: bool
enable_dns_support:
description: True/False depending on attribute setting for DNS support.
returned: always
type: bool
ipv6_cidr_block_association_set:
description: An array of IPv6 cidr block association set information.
returned: always
type: complex
contains:
association_id:
description: The association ID
returned: always
type: str
ipv6_cidr_block:
description: The IPv6 CIDR block that is associated with the VPC.
returned: always
type: str
ipv6_cidr_block_state:
description: A hash/dict that contains a single item. The state of the cidr block association.
returned: always
type: dict
contains:
state:
description: The CIDR block association state.
returned: always
type: str
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
boto3_conn,
ec2_argument_spec,
get_aws_connection_info,
AWSRetry,
HAS_BOTO3,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
ansible_dict_to_boto3_filter_list
)
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
@AWSRetry.exponential_backoff()
def describe_vpc_attr_with_backoff(connection, vpc_id, vpc_attribute):
"""
Describe VPC Attributes with AWSRetry backoff throttling support.
connection : boto3 client connection object
vpc_id : The VPC ID to pull attribute value from
vpc_attribute : The VPC attribute to get the value from - valid options = enableDnsSupport or enableDnsHostnames
"""
return connection.describe_vpc_attribute(VpcId=vpc_id, Attribute=vpc_attribute)
def describe_vpcs(connection, module):
"""
Describe VPCs.
connection : boto3 client connection object
module : AnsibleModule object
"""
# collect parameters
filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
vpc_ids = module.params.get('vpc_ids')
# init empty list for return vars
vpc_info = list()
vpc_list = list()
# Get the basic VPC info
try:
response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe VPCs {0}: {1}".format(vpc_ids, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe VPCs {0}: {1}".format(vpc_ids, to_native(e)),
exception=traceback.format_exc())
# Loop through results and create a list of VPC IDs
for vpc in response['Vpcs']:
vpc_list.append(vpc['VpcId'])
# We can get these results in bulk but still needs two separate calls to the API
try:
cl_enabled = connection.describe_vpc_classic_link(VpcIds=vpc_list)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
cl_enabled = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkEnabled': False} for vpc_id in vpc_list]}
else:
module.fail_json(msg="Unable to describe if ClassicLink is enabled: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe if ClassicLink is enabled: {0}".format(to_native(e)),
exception=traceback.format_exc())
try:
cl_dns_support = connection.describe_vpc_classic_link_dns_support(VpcIds=vpc_list)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
cl_dns_support = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkDnsSupported': False} for vpc_id in vpc_list]}
else:
module.fail_json(msg="Unable to describe if ClassicLinkDns is supported: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe if ClassicLinkDns is supported: {0}".format(to_native(e)),
exception=traceback.format_exc())
# Loop through the results and add the other VPC attributes we gathered
for vpc in response['Vpcs']:
error_message = "Unable to describe VPC attribute {0}: {1}"
# We have to make two separate calls per VPC to get these attributes.
try:
dns_support = describe_vpc_attr_with_backoff(connection, vpc['VpcId'], 'enableDnsSupport')
except botocore.exceptions.ClientError as e:
module.fail_json(msg=error_message.format('enableDnsSupport', to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg=error_message.format('enableDnsSupport', to_native(e)),
exception=traceback.format_exc())
try:
dns_hostnames = describe_vpc_attr_with_backoff(connection, vpc['VpcId'], 'enableDnsHostnames')
except botocore.exceptions.ClientError as e:
module.fail_json(msg=error_message.format('enableDnsHostnames', to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg=error_message.format('enableDnsHostnames', to_native(e)),
exception=traceback.format_exc())
# loop through the ClassicLink Enabled results and add the value for the correct VPC
for item in cl_enabled['Vpcs']:
if vpc['VpcId'] == item['VpcId']:
vpc['ClassicLinkEnabled'] = item['ClassicLinkEnabled']
# loop through the ClassicLink DNS support results and add the value for the correct VPC
for item in cl_dns_support['Vpcs']:
if vpc['VpcId'] == item['VpcId']:
vpc['ClassicLinkDnsSupported'] = item['ClassicLinkDnsSupported']
# add the two DNS attributes
vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value')
vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value')
# for backwards compatibility
vpc['id'] = vpc['VpcId']
vpc_info.append(camel_dict_to_snake_dict(vpc))
# convert tag list to ansible dict
vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', []))
module.exit_json(vpcs=vpc_info)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
vpc_ids=dict(type='list', default=[]),
filters=dict(type='dict', default={})
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
describe_vpcs(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
voodka/ghostbakup | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/murphy.py | 364 | 2751 | # -*- coding: utf-8 -*-
"""
pygments.styles.murphy
~~~~~~~~~~~~~~~~~~~~~~
Murphy's style from CodeRay.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class MurphyStyle(Style):
"""
Murphy's style from CodeRay.
"""
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "#666 italic",
Comment.Preproc: "#579 noitalic",
Comment.Special: "#c00 bold",
Keyword: "bold #289",
Keyword.Pseudo: "#08f",
Keyword.Type: "#66f",
Operator: "#333",
Operator.Word: "bold #000",
Name.Builtin: "#072",
Name.Function: "bold #5ed",
Name.Class: "bold #e9e",
Name.Namespace: "bold #0e84b5",
Name.Exception: "bold #F00",
Name.Variable: "#036",
Name.Variable.Instance: "#aaf",
Name.Variable.Class: "#ccf",
Name.Variable.Global: "#f84",
Name.Constant: "bold #5ed",
Name.Label: "bold #970",
Name.Entity: "#800",
Name.Attribute: "#007",
Name.Tag: "#070",
Name.Decorator: "bold #555",
String: "bg:#e0e0ff",
String.Char: "#88F bg:",
String.Doc: "#D42 bg:",
String.Interpol: "bg:#eee",
String.Escape: "bold #666",
String.Regex: "bg:#e0e0ff #000",
String.Symbol: "#fc8 bg:",
String.Other: "#f88",
Number: "bold #60E",
Number.Integer: "bold #66f",
Number.Float: "bold #60E",
Number.Hex: "bold #058",
Number.Oct: "bold #40E",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#F00 bg:#FAA"
}
| mit |
pplatek/odoo | openerp/addons/test_new_api/models.py | 74 | 7032 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class res_partner(osv.Model):
_inherit = 'res.partner'
#
# add related fields to test them
#
_columns = {
# a regular one
'related_company_partner_id': fields.related(
'company_id', 'partner_id', type='many2one', obj='res.partner'),
# a related field with a single field
'single_related_company_id': fields.related(
'company_id', type='many2one', obj='res.company'),
# a related field with a single field that is also a related field!
'related_related_company_id': fields.related(
'single_related_company_id', type='many2one', obj='res.company'),
}
from openerp import models, fields, api, _
class Category(models.Model):
_name = 'test_new_api.category'
name = fields.Char(required=True)
parent = fields.Many2one('test_new_api.category')
display_name = fields.Char(compute='_compute_display_name', inverse='_inverse_display_name')
@api.one
@api.depends('name', 'parent.display_name') # this definition is recursive
def _compute_display_name(self):
if self.parent:
self.display_name = self.parent.display_name + ' / ' + self.name
else:
self.display_name = self.name
@api.one
def _inverse_display_name(self):
names = self.display_name.split('/')
# determine sequence of categories
categories = []
for name in names[:-1]:
category = self.search([('name', 'ilike', name.strip())])
categories.append(category[0])
categories.append(self)
# assign parents following sequence
for parent, child in zip(categories, categories[1:]):
if parent and child:
child.parent = parent
# assign name of last category, and reassign display_name (to normalize it)
self.name = names[-1].strip()
class Discussion(models.Model):
_name = 'test_new_api.discussion'
name = fields.Char(string='Title', required=True,
help="General description of what this discussion is about.")
moderator = fields.Many2one('res.users')
categories = fields.Many2many('test_new_api.category',
'test_new_api_discussion_category', 'discussion', 'category')
participants = fields.Many2many('res.users')
messages = fields.One2many('test_new_api.message', 'discussion')
message_changes = fields.Integer(string='Message changes')
@api.onchange('moderator')
def _onchange_moderator(self):
self.participants |= self.moderator
@api.onchange('messages')
def _onchange_messages(self):
self.message_changes = len(self.messages)
class Message(models.Model):
_name = 'test_new_api.message'
discussion = fields.Many2one('test_new_api.discussion', ondelete='cascade')
body = fields.Text()
author = fields.Many2one('res.users', default=lambda self: self.env.user)
name = fields.Char(string='Title', compute='_compute_name', store=True)
display_name = fields.Char(string='Abstract', compute='_compute_display_name')
size = fields.Integer(compute='_compute_size', search='_search_size')
double_size = fields.Integer(compute='_compute_double_size')
discussion_name = fields.Char(related='discussion.name')
@api.one
@api.constrains('author', 'discussion')
def _check_author(self):
if self.discussion and self.author not in self.discussion.participants:
raise ValueError(_("Author must be among the discussion participants."))
@api.one
@api.depends('author.name', 'discussion.name')
def _compute_name(self):
self.name = "[%s] %s" % (self.discussion.name or '', self.author.name or '')
@api.one
@api.depends('author.name', 'discussion.name', 'body')
def _compute_display_name(self):
stuff = "[%s] %s: %s" % (self.author.name, self.discussion.name or '', self.body or '')
self.display_name = stuff[:80]
@api.one
@api.depends('body')
def _compute_size(self):
self.size = len(self.body or '')
def _search_size(self, operator, value):
if operator not in ('=', '!=', '<', '<=', '>', '>=', 'in', 'not in'):
return []
# retrieve all the messages that match with a specific SQL query
query = """SELECT id FROM "%s" WHERE char_length("body") %s %%s""" % \
(self._table, operator)
self.env.cr.execute(query, (value,))
ids = [t[0] for t in self.env.cr.fetchall()]
return [('id', 'in', ids)]
@api.one
@api.depends('size')
def _compute_double_size(self):
# This illustrates a subtle situation: self.double_size depends on
# self.size. When size is computed, self.size is assigned, which should
# normally invalidate self.double_size. However, this may not happen
# while self.double_size is being computed: the last statement below
# would fail, because self.double_size would be undefined.
self.double_size = 0
size = self.size
self.double_size = self.double_size + size
class MixedModel(models.Model):
_name = 'test_new_api.mixed'
number = fields.Float(digits=(10, 2), default=3.14)
date = fields.Date()
now = fields.Datetime(compute='_compute_now')
lang = fields.Selection(string='Language', selection='_get_lang')
reference = fields.Reference(string='Related Document',
selection='_reference_models')
@api.one
def _compute_now(self):
# this is a non-stored computed field without dependencies
self.now = fields.Datetime.now()
@api.model
def _get_lang(self):
langs = self.env['res.lang'].search([])
return [(lang.code, lang.name) for lang in langs]
@api.model
def _reference_models(self):
models = self.env['ir.model'].search([('state', '!=', 'manual')])
return [(model.model, model.name)
for model in models
if not model.model.startswith('ir.')]
| agpl-3.0 |
chongtianfeiyu/kbengine | kbe/res/scripts/common/Lib/concurrent/futures/process.py | 93 | 17574 | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ProcessPoolExecutor.
The follow diagram and text describe the data-flow through the system:
|======================= In-process =====================|== Out-of-process ==|
+----------+ +----------+ +--------+ +-----------+ +---------+
| | => | Work Ids | => | | => | Call Q | => | |
| | +----------+ | | +-----------+ | |
| | | ... | | | | ... | | |
| | | 6 | | | | 5, call() | | |
| | | 7 | | | | ... | | |
| Process | | ... | | Local | +-----------+ | Process |
| Pool | +----------+ | Worker | | #1..n |
| Executor | | Thread | | |
| | +----------- + | | +-----------+ | |
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
| | +------------+ | | +-----------+ | |
| | | 6: call() | | | | ... | | |
| | | future | | | | 4, result | | |
| | | ... | | | | 3, except | | |
+----------+ +------------+ +--------+ +-----------+ +---------+
Executor.submit() called:
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
- adds the id of the _WorkItem to the "Work Ids" queue
Local worker thread:
- reads work ids from the "Work Ids" queue and looks up the corresponding
WorkItem from the "Work Items" dict: if the work item has been cancelled then
it is simply removed from the dict, otherwise it is repackaged as a
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
- reads _ResultItems from "Result Q", updates the future stored in the
"Work Items" dict and deletes the dict entry
Process #1..n:
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
_ResultItems in "Result Q"
"""
__author__ = 'Brian Quinlan ([email protected])'
import atexit
import os
from concurrent.futures import _base
import queue
from queue import Full
import multiprocessing
from multiprocessing import SimpleQueue
from multiprocessing.connection import wait
import threading
import weakref
# Workers are created as daemon threads and processes. This is done to allow the
# interpreter to exit when there are still idle processes in a
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
# allowing workers to die with the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads/processes finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
# Controls how many more calls than processes will be queued in the call queue.
# A smaller number will mean that processes spend more time idle waiting for
# work while a larger number will make Future.cancel() succeed less frequently
# (Futures in the call queue cannot be cancelled).
EXTRA_QUEUED_CALLS = 1
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
class _ResultItem(object):
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
class _CallItem(object):
def __init__(self, work_id, fn, args, kwargs):
self.work_id = work_id
self.fn = fn
self.args = args
self.kwargs = kwargs
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(os.getpid())
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException as e:
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
def _add_call_item_to_queue(pending_work_items,
work_ids,
call_queue):
"""Fills call_queue with _WorkItems from pending_work_items.
This function never blocks.
Args:
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
are consumed and the corresponding _WorkItems from
pending_work_items are transformed into _CallItems and put in
call_queue.
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems.
"""
while True:
if call_queue.full():
return
try:
work_id = work_ids.get(block=False)
except queue.Empty:
return
else:
work_item = pending_work_items[work_id]
if work_item.future.set_running_or_notify_cancel():
call_queue.put(_CallItem(work_id,
work_item.fn,
work_item.args,
work_item.kwargs),
block=True)
else:
del pending_work_items[work_id]
continue
def _queue_management_worker(executor_reference,
processes,
pending_work_items,
work_ids_queue,
call_queue,
result_queue):
"""Manages the communication between this process and the worker processes.
This function is run in a local thread.
Args:
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
this thread. Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit.
process: A list of the multiprocessing.Process instances used as
workers.
pending_work_items: A dict mapping work ids to _WorkItems e.g.
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
call_queue: A multiprocessing.Queue that will be filled with _CallItems
derived from _WorkItems for processing by the process workers.
result_queue: A multiprocessing.Queue of _ResultItems generated by the
process workers.
"""
executor = None
def shutting_down():
return _shutdown or executor is None or executor._shutdown_thread
def shutdown_worker():
# This is an upper bound
nb_children_alive = sum(p.is_alive() for p in processes.values())
for i in range(0, nb_children_alive):
call_queue.put_nowait(None)
# Release the queue's resources as soon as possible.
call_queue.close()
# If .join() is not called on the created processes then
# some multiprocessing.Queue methods may deadlock on Mac OS X.
for p in processes.values():
p.join()
reader = result_queue._reader
while True:
_add_call_item_to_queue(pending_work_items,
work_ids_queue,
call_queue)
sentinels = [p.sentinel for p in processes.values()]
assert sentinels
ready = wait([reader] + sentinels)
if reader in ready:
result_item = reader.recv()
else:
# Mark the process pool broken so that submits fail right now.
executor = executor_reference()
if executor is not None:
executor._broken = True
executor._shutdown_thread = True
executor = None
# All futures in flight must be marked failed
for work_id, work_item in pending_work_items.items():
work_item.future.set_exception(
BrokenProcessPool(
"A process in the process pool was "
"terminated abruptly while the future was "
"running or pending."
))
# Delete references to object. See issue16284
del work_item
pending_work_items.clear()
# Terminate remaining workers forcibly: the queues or their
# locks may be in a dirty state and block forever.
for p in processes.values():
p.terminate()
shutdown_worker()
return
if isinstance(result_item, int):
# Clean shutdown of a worker using its PID
# (avoids marking the executor broken)
assert shutting_down()
p = processes.pop(result_item)
p.join()
if not processes:
shutdown_worker()
return
elif result_item is not None:
work_item = pending_work_items.pop(result_item.work_id, None)
# work_item can be None if another process terminated (see above)
if work_item is not None:
if result_item.exception:
work_item.future.set_exception(result_item.exception)
else:
work_item.future.set_result(result_item.result)
# Delete references to object. See issue16284
del work_item
# Check whether we should start shutting down.
executor = executor_reference()
# No more work items can be added if:
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown.
if shutting_down():
try:
# Since no new work items can be added, it is safe to shutdown
# this thread if there are no pending work items.
if not pending_work_items:
shutdown_worker()
return
except Full:
# This is not a problem: we will eventually be woken up (in
# result_queue.get()) and be able to send a sentinel again.
pass
executor = None
_system_limits_checked = False
_system_limited = None
def _check_system_limits():
global _system_limits_checked, _system_limited
if _system_limits_checked:
if _system_limited:
raise NotImplementedError(_system_limited)
_system_limits_checked = True
try:
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems_max == -1:
# indetermined limit, assume that limit is determined
# by available memory only
return
if nsems_max >= 256:
# minimum number of semaphores available
# according to POSIX
return
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
raise NotImplementedError(_system_limited)
class BrokenProcessPool(RuntimeError):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
while a future was in the running state.
"""
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
# Start the processes so that their sentinels are known.
self._adjust_process_count()
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes[p.pid] = p
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool('A child process terminated '
'abruptly, the process pool is not usable anymore')
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
self._result_queue = None
self._processes = None
shutdown.__doc__ = _base.Executor.shutdown.__doc__
atexit.register(_python_exit)
| lgpl-3.0 |
orb-framework/orb | tests/functional/mysql/test_my_statements.py | 2 | 5444 | def test_my_statement_add_column(User, my_sql):
st = my_sql.statement('ADD COLUMN')
assert st is not None
statement, data = st(User.schema().column('username'))
assert statement == 'ADD COLUMN `username` varchar(255) UNIQUE'
def test_my_statement_create_table(User, my_sql):
st = my_sql.statement('CREATE')
assert st is not None
statement, data = st(User)
assert 'CREATE TABLE IF NOT EXISTS `orb_testing`.`users`' in statement
def test_my_statement_create_table_in_namespace(User, my_sql):
import orb
with orb.Context(namespace='custom'):
st = my_sql.statement('CREATE')
assert st is not None
statement, data = st(User)
assert 'CREATE TABLE IF NOT EXISTS `custom`.`users`' in statement
def test_my_statement_insert_records(orb, User, my_sql):
st = my_sql.statement('INSERT')
assert st is not None
user_a = User(username='bob')
user_b = User(username='sally')
statement, data = st([user_a, user_b])
assert 'INSERT INTO `orb_testing`.`users`' in statement
def test_my_statement_insert_records_in_namespace(orb, User, my_sql):
user_a = User(username='bob')
user_b = User(username='sally')
with orb.Context(namespace='custom'):
st = my_sql.statement('INSERT')
assert st is not None
statement, data = st([user_a, user_b])
assert 'INSERT INTO `custom`.`users`' in statement
def test_my_statement_alter(orb, GroupUser, my_sql):
add = [orb.StringColumn(name='test_add')]
remove = [orb.StringColumn(name='test_remove')]
st = my_sql.statement('ALTER')
assert st is not None
statement, data = st(GroupUser, add, remove)
assert 'ALTER' in statement
add = [orb.StringColumn(name='test_add_i18n', flags={'I18n'})]
statement, data = st(GroupUser, add)
assert 'ALTER' in statement
def test_my_statement_alter_invalid(orb, my_sql):
st = my_sql.statement('ALTER')
assert st is not None
with pytest.raises(orb.errors.OrbError):
statement, data = st(orb.View)
def test_my_statement_create_index(orb, GroupUser, my_sql):
index = orb.Index(name='byGroupAndUser', columns=[orb.ReferenceColumn(name='group'), orb.ReferenceColumn('user')])
index.setSchema(GroupUser.schema())
st = my_sql.statement('CREATE INDEX')
assert st is not None
statement, data = st(index)
assert 'CREATE INDEX' in statement
statement, data = st(index, checkFirst=True)
assert 'DO $$' in statement
def test_my_create_table(User, my_sql, my_db):
st = my_sql.statement('CREATE')
sql, data = st(User)
conn = my_db.connection()
conn.execute(sql)
assert True
def test_my_insert_bob(orb, User, my_sql, my_db):
st = my_sql.statement('INSERT')
user_a = User({
'username': 'bob',
'password': 'T3st1ng!'
})
sql, data = st([user_a])
conn = my_db.connection()
# if this has run before, then it will raise a duplicate entry
try:
conn.execute(sql, data)
except orb.errors.DuplicateEntryFound:
pass
assert True
def test_my_insert_sally(orb, User, my_sql, my_db):
st = my_sql.statement('INSERT')
user_a = User({
'username':'sally',
'password': 'T3st1ng!'
})
sql, data = st([user_a])
conn = my_db.connection()
# if this has run before, then it will raise a duplicate entry
try:
conn.execute(sql, data)
except orb.errors.DuplicateEntryFound:
pass
assert True
def test_my_select_all(orb, User, my_sql, my_db):
st = my_sql.statement('SELECT')
sql, data = st(User, orb.Context())
conn = my_db.connection()
records, count = conn.execute(sql, data)
assert len(records) == count
def test_my_select_one(orb, User, my_sql, my_db):
st = my_sql.statement('SELECT')
sql, data = st(User, orb.Context(limit=1))
conn = my_db.connection()
records, count = conn.execute(sql, data)
assert count == 1
def test_my_select_bob(orb, User, my_sql, my_db):
st = my_sql.statement('SELECT')
sql, data = st(User, orb.Context(where=orb.Query('username') == 'bob'))
conn = my_db.connection()
records, count = conn.execute(sql, data)
assert count == 1 and records[0]['username'] == 'bob'
def test_my_select_count(orb, User, my_sql, my_db):
select_st = my_sql.statement('SELECT')
select_sql, data = select_st(User, orb.Context())
conn = my_db.connection()
records, count = conn.execute(select_sql, data)
select_count_st = my_sql.statement('SELECT COUNT')
select_count_sql, data = select_count_st(User, orb.Context())
results, _ = conn.execute(select_count_sql, data)
assert results[0]['count'] == count
def test_my_select_bob_or_sally(orb, my_sql, my_db, User):
st = my_sql.statement('SELECT')
q = orb.Query('username') == 'bob'
q |= orb.Query('username') == 'sally'
sql, data = st(User, orb.Context(where=q))
conn = my_db.connection()
records, count = conn.execute(sql, data)
assert count == 2 and records[0]['username'] in ('bob', 'sally') and records[1]['username'] in ('bob', 'sally')
def test_my_select_bob_and_sally(orb, my_sql, my_db, User):
st = my_sql.statement('SELECT')
q = orb.Query('username') == 'bob'
q &= orb.Query('username') == 'sally'
sql, data = st(User, orb.Context(where=q))
conn = my_db.connection()
_, count = conn.execute(sql, data)
assert count == 0
| mit |
sebasbaumh/three.js | utils/converters/fbx/convert_to_threejs.py | 16 | 77099 | # @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
import operator
import re
import json
import types
import shutil
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_copy_textures = True
option_prefix = True
option_geometry = False
option_forced_y_up = False
option_default_camera = False
option_default_light = False
option_pretty_print = False
converter = None
inputFolder = ""
outputFolder = ""
# #####################################################
# Pretty Printing Hacks
# #####################################################
# Force an array to be printed fully on a single line
class NoIndent(object):
def __init__(self, value, separator = ','):
self.separator = separator
self.value = value
def encode(self):
if not self.value:
return None
return '[ %s ]' % self.separator.join(str(f) for f in self.value)
# Force an array into chunks rather than printing each element on a new line
class ChunkedIndent(object):
def __init__(self, value, chunk_size = 15, force_rounding = False):
self.value = value
self.size = chunk_size
self.force_rounding = force_rounding
def encode(self):
# Turn the flat array into an array of arrays where each subarray is of
# length chunk_size. Then string concat the values in the chunked
# arrays, delimited with a ', ' and round the values finally append
# '{CHUNK}' so that we can find the strings with regex later
if not self.value:
return None
if self.force_rounding:
return ['{CHUNK}%s' % ', '.join(str(round(f, 6)) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
else:
return ['{CHUNK}%s' % ', '.join(str(f) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
# This custom encoder looks for instances of NoIndent or ChunkedIndent.
# When it finds
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, NoIndent) or isinstance(obj, ChunkedIndent):
return obj.encode()
else:
return json.JSONEncoder.default(self, obj)
def executeRegexHacks(output_string):
# turn strings of arrays into arrays (remove the double quotes)
output_string = re.sub(':\s*\"(\[.*\])\"', r': \1', output_string)
output_string = re.sub('(\n\s*)\"(\[.*\])\"', r'\1\2', output_string)
output_string = re.sub('(\n\s*)\"{CHUNK}(.*)\"', r'\1\2', output_string)
# replace '0metadata' with metadata
output_string = re.sub('0metadata', r'metadata', output_string)
# replace 'zchildren' with children
output_string = re.sub('zchildren', r'children', output_string)
# add an extra newline after '"children": {'
output_string = re.sub('(children.*{\s*\n)', r'\1\n', output_string)
# add an extra newline after '},'
output_string = re.sub('},\s*\n', r'},\n\n', output_string)
# add an extra newline after '\n\s*],'
output_string = re.sub('(\n\s*)],\s*\n', r'\1],\n\n', output_string)
return output_string
# #####################################################
# Object Serializers
# #####################################################
# FbxVector2 is not JSON serializable
def serializeVector2(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5))
if option_pretty_print:
return NoIndent([v[0], v[1]], ', ')
else:
return [v[0], v[1]]
# FbxVector3 is not JSON serializable
def serializeVector3(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2]], ', ')
else:
return [v[0], v[1], v[2]]
# FbxVector4 is not JSON serializable
def serializeVector4(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if math.isnan(v[3]) or math.isinf(v[3]):
v[3] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5), round(v[3], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2], v[3]], ', ')
else:
return [v[0], v[1], v[2], v[3]]
# #####################################################
# Helpers
# #####################################################
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return int(color)
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
tmp = []
for uv in uvs:
tmp.append(uv[0])
tmp.append(uv[1])
if option_pretty_print:
layer = ChunkedIndent(tmp)
else:
layer = tmp
layers.append(layer)
return layers
# #####################################################
# Object Name Helpers
# #####################################################
def hasUniqueName(o, class_id):
scene = o.GetScene()
object_name = o.GetName()
object_id = o.GetUniqueID()
object_count = scene.GetSrcObjectCount(class_id)
for i in range(object_count):
other = scene.GetSrcObject(class_id, i)
other_id = other.GetUniqueID()
other_name = other.GetName()
if other_id == object_id:
continue
if other_name == object_name:
return False
return True
def getObjectName(o, force_prefix = False):
if not o:
return ""
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxNode.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Object_%s_" % object_id
return prefix + object_name
def getMaterialName(o, force_prefix = False):
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxSurfaceMaterial.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Material_%s_" % object_id
return prefix + object_name
def getTextureName(t, force_prefix = False):
if type(t) is FbxFileTexture:
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
else:
texture_id = t.GetName()
if texture_id == "_empty_":
texture_id = ""
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % t.GetUniqueID()
if len(texture_id) == 0:
prefix = prefix[0:len(prefix)-1]
return prefix + texture_id
def getMtlTextureName(texture_name, texture_id, force_prefix = False):
texture_name = os.path.splitext(texture_name)[0]
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % texture_id
return prefix + texture_name
def getPrefixedName(o, prefix):
return (prefix + '_%s_') % o.GetUniqueID() + o.GetName()
# #####################################################
# Triangulation
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate Material Object
# #####################################################
def generate_texture_bindings(material_property, material_params):
# FBX to Three.js texture types
binding_types = {
"DiffuseColor": "map",
"DiffuseFactor": "diffuseFactor",
"EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor",
"AmbientColor": "lightMap", # "ambientMap",
"AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap",
"SpecularFactor": "specularFactor",
"ShininessExponent": "shininessExponent",
"NormalMap": "normalMap",
"Bump": "bumpMap",
"TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor",
"ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor",
"DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
def generate_material_object(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = None
material_params = None
material_type = None
if implementation:
print("Shader materials are not supported")
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
transparent = False
reflectivity = 1
material_type = 'MeshBasicMaterial'
# material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
specular = getHex(material.Specular.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
shininess = material.Shininess.Get()
transparent = False
reflectivity = 1
bumpScale = 1
material_type = 'MeshPhongMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'specular' : specular,
'shininess' : shininess,
'bumpScale' : bumpScale,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
else:
print "Unknown type of Material", getMaterialName(material)
# default to Lambert Material if the current Material type cannot be handeled
if not material_type:
ambient = getHex((0,0,0))
diffuse = getHex((0.5,0.5,0.5))
emissive = getHex((0,0,0))
opacity = 1
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
if option_textures:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, material_params)
material_params['wireframe'] = False
material_params['wireframeLinewidth'] = 1
output = {
'type' : material_type,
'parameters' : material_params
}
return output
def generate_proxy_material_object(node, material_names):
material_type = 'MultiMaterial'
material_params = {
'materials' : material_names
}
output = {
'type' : material_type,
'parameters' : material_params
}
return output
# #####################################################
# Find Scene Materials
# #####################################################
def extract_materials_from_node(node, material_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
if material_count > 1:
proxy_material = generate_proxy_material_object(node, material_names)
proxy_name = getMaterialName(node, True)
material_dict[proxy_name] = proxy_material
def generate_materials_from_hierarchy(node, material_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_dict)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
def generate_material_dict(scene):
material_dict = {}
# generate all materials for this scene
material_count = scene.GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for i in range(material_count):
material = scene.GetSrcObject(FbxSurfaceMaterial.ClassId, i)
material_object = generate_material_object(material)
material_name = getMaterialName(material)
material_dict[material_name] = material_object
# generate material porxies
# Three.js does not support meshs with multiple materials, however it does
# support materials with multiple submaterials
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
return material_dict
# #####################################################
# Generate Texture Object
# #####################################################
def generate_texture_object(texture):
#TODO: extract more texture properties
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
if type(texture) is FbxFileTexture:
url = texture.GetFileName()
else:
url = getTextureName( texture )
#url = replace_inFolder2OutFolder( url )
#print( url )
index = url.rfind( '/' )
if index == -1:
index = url.rfind( '\\' )
filename = url[ index+1 : len(url) ]
output = {
'url': filename,
'fullpath': url,
'repeat': serializeVector2( (1,1) ),
'offset': serializeVector2( texture.GetUVTranslation() ),
'magFilter': 'LinearFilter',
'minFilter': 'LinearMipMapLinearFilter',
'anisotropy': True
}
return output
# #####################################################
# Replace Texture input path to output
# #####################################################
def replace_inFolder2OutFolder(url):
folderIndex = url.find(inputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(inputFolder): ]
url = outputFolder + url
return url
# #####################################################
# Replace Texture output path to input
# #####################################################
def replace_OutFolder2inFolder(url):
folderIndex = url.find(outputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(outputFolder): ]
url = inputFolder + url
return url
# #####################################################
# Find Scene Textures
# #####################################################
def extract_material_textures(material_property, texture_dict):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
def extract_textures_from_node(node, texture_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_dict)
def generate_textures_from_hierarchy(node, texture_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_dict)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
def generate_texture_dict(scene):
if not option_textures:
return {}
texture_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
return texture_dict
# #####################################################
# Extract Fbx SDK Mesh Data
# #####################################################
def extract_fbx_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
tmp = control_points[i]
tmp = [tmp[0], tmp[1], tmp[2]]
positions.append(tmp)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
for i in range(len(positions)):
v = positions[i]
position = FbxVector4(v[0], v[1], v[2])
position = transform.MultNormalize(position)
positions[i] = [position[0], position[1], position[2]]
return positions
def extract_fbx_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = normals_array.GetAt(i)
normal = [normal[0], normal[1], normal[2]]
normal_values.append(normal)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
t = FbxVector4(0,0,0,1)
transform.SetRow(3, t)
for i in range(len(normal_values)):
n = normal_values[i]
normal = FbxVector4(n[0], n[1], n[2])
normal = transform.MultNormalize(normal)
normal.Normalize()
normal = [normal[0], normal[1], normal[2]]
normal_values[i] = normal
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
# mapping mode is by control points. The mesh should be smooth and soft.
# we can get normals by retrieving each control point
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
# reference mode is direct, the normal index is same as vertex index.
# get normals by the index of control vertex
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
# mapping mode is by polygon-vertex.
# we can get normals by retrieving polygon-vertex.
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
normal_values = []
normal_indices = []
# Three.js only supports one layer of normals
if len(layered_normal_values) > 0:
normal_values = layered_normal_values[0]
normal_indices = layered_normal_indices[0]
return normal_values, normal_indices
def extract_fbx_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = colors_array.GetAt(i)
color = [color.mRed, color.mGreen, color.mBlue, color.mAlpha]
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
layered_color_indices.append( color_indices )
layered_color_values.append( color_values )
color_values = []
color_indices = []
# Three.js only supports one layer of colors
if len(layered_color_values) > 0:
color_values = layered_color_values[0]
color_indices = layered_color_indices[0]
'''
# The Fbx SDK defaults mesh.Color to (0.8, 0.8, 0.8)
# This causes most models to receive incorrect vertex colors
if len(color_values) == 0:
color = mesh.Color.Get()
color_values = [[color[0], color[1], color[2]]]
color_indices = []
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
color_indices.append([0] * poly_size)
'''
return color_values, color_indices
def extract_fbx_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = uvs_array.GetAt(i)
uv = [uv[0], uv[1]]
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh_uvs.GetIndexArray().GetAt(vertexId)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
# #####################################################
# Process Mesh Geometry
# #####################################################
def generate_normal_key(normal):
return (round(normal[0], 6), round(normal[1], 6), round(normal[2], 6))
def generate_color_key(color):
return getHex(color)
def generate_uv_key(uv):
return (round(uv[0], 6), round(uv[1], 6))
def append_non_duplicate_uvs(source_uvs, dest_uvs, counts):
source_layer_count = len(source_uvs)
for layer_index in range(source_layer_count):
dest_layer_count = len(dest_uvs)
if dest_layer_count <= layer_index:
dest_uv_layer = {}
count = 0
dest_uvs.append(dest_uv_layer)
counts.append(count)
else:
dest_uv_layer = dest_uvs[layer_index]
count = counts[layer_index]
source_uv_layer = source_uvs[layer_index]
for uv in source_uv_layer:
key = generate_uv_key(uv)
if key not in dest_uv_layer:
dest_uv_layer[key] = count
count += 1
counts[layer_index] = count
return counts
def generate_unique_normals_dictionary(mesh_list):
normals_dictionary = {}
nnormals = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
node = mesh.GetNode()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
if len(normal_values) > 0:
for normal in normal_values:
key = generate_normal_key(normal)
if key not in normals_dictionary:
normals_dictionary[key] = nnormals
nnormals += 1
return normals_dictionary
def generate_unique_colors_dictionary(mesh_list):
colors_dictionary = {}
ncolors = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
color_values, color_indices = extract_fbx_vertex_colors(mesh)
if len(color_values) > 0:
for color in color_values:
key = generate_color_key(color)
if key not in colors_dictionary:
colors_dictionary[key] = ncolors
ncolors += 1
return colors_dictionary
def generate_unique_uvs_dictionary_layers(mesh_list):
uvs_dictionary_layers = []
nuvs_list = []
# Merge meshes, remove duplicate data
for mesh in mesh_list:
uv_values, uv_indices = extract_fbx_vertex_uvs(mesh)
if len(uv_values) > 0:
nuvs_list = append_non_duplicate_uvs(uv_values, uvs_dictionary_layers, nuvs_list)
return uvs_dictionary_layers
def generate_normals_from_dictionary(normals_dictionary):
normal_values = []
for key, index in sorted(normals_dictionary.items(), key = operator.itemgetter(1)):
normal_values.append(key)
return normal_values
def generate_colors_from_dictionary(colors_dictionary):
color_values = []
for key, index in sorted(colors_dictionary.items(), key = operator.itemgetter(1)):
color_values.append(key)
return color_values
def generate_uvs_from_dictionary_layers(uvs_dictionary_layers):
uv_values = []
for uvs_dictionary in uvs_dictionary_layers:
uv_values_layer = []
for key, index in sorted(uvs_dictionary.items(), key = operator.itemgetter(1)):
uv_values_layer.append(key)
uv_values.append(uv_values_layer)
return uv_values
def generate_normal_indices_for_poly(poly_index, mesh_normal_values, mesh_normal_indices, normals_to_indices):
if len(mesh_normal_indices) <= 0:
return []
poly_normal_indices = mesh_normal_indices[poly_index]
poly_size = len(poly_normal_indices)
output_poly_normal_indices = []
for v in range(poly_size):
normal_index = poly_normal_indices[v]
normal_value = mesh_normal_values[normal_index]
key = generate_normal_key(normal_value)
output_index = normals_to_indices[key]
output_poly_normal_indices.append(output_index)
return output_poly_normal_indices
def generate_color_indices_for_poly(poly_index, mesh_color_values, mesh_color_indices, colors_to_indices):
if len(mesh_color_indices) <= 0:
return []
poly_color_indices = mesh_color_indices[poly_index]
poly_size = len(poly_color_indices)
output_poly_color_indices = []
for v in range(poly_size):
color_index = poly_color_indices[v]
color_value = mesh_color_values[color_index]
key = generate_color_key(color_value)
output_index = colors_to_indices[key]
output_poly_color_indices.append(output_index)
return output_poly_color_indices
def generate_uv_indices_for_poly(poly_index, mesh_uv_values, mesh_uv_indices, uvs_to_indices):
if len(mesh_uv_indices) <= 0:
return []
poly_uv_indices = mesh_uv_indices[poly_index]
poly_size = len(poly_uv_indices)
output_poly_uv_indices = []
for v in range(poly_size):
uv_index = poly_uv_indices[v]
uv_value = mesh_uv_values[uv_index]
key = generate_uv_key(uv_value)
output_index = uvs_to_indices[key]
output_poly_uv_indices.append(output_index)
return output_poly_uv_indices
def process_mesh_vertices(mesh_list):
vertex_offset = 0
vertex_offset_list = [0]
vertices = []
for mesh in mesh_list:
node = mesh.GetNode()
mesh_vertices = extract_fbx_vertex_positions(mesh)
vertices.extend(mesh_vertices[:])
vertex_offset += len(mesh_vertices)
vertex_offset_list.append(vertex_offset)
return vertices, vertex_offset_list
def process_mesh_materials(mesh_list):
material_offset = 0
material_offset_list = [0]
materials_list = []
#TODO: remove duplicate mesh references
for mesh in mesh_list:
node = mesh.GetNode()
material_count = node.GetMaterialCount()
if material_count > 0:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
materials_list.append( material )
material_offset += material_count
material_offset_list.append(material_offset)
return materials_list, material_offset_list
def process_mesh_polygons(mesh_list, normals_to_indices, colors_to_indices, uvs_to_indices_list, vertex_offset_list, material_offset_list):
faces = []
for mesh_index in range(len(mesh_list)):
mesh = mesh_list[mesh_index]
flipWindingOrder = False
node = mesh.GetNode()
if node:
local_scale = node.EvaluateLocalScaling()
if local_scale[0] < 0 or local_scale[1] < 0 or local_scale[2] < 0:
flipWindingOrder = True
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
color_values, color_indices = extract_fbx_vertex_colors(mesh)
uv_values_layers, uv_indices_layers = extract_fbx_vertex_uvs(mesh)
for poly_index in range(poly_count):
poly_size = mesh.GetPolygonSize(poly_index)
face_normals = generate_normal_indices_for_poly(poly_index, normal_values, normal_indices, normals_to_indices)
face_colors = generate_color_indices_for_poly(poly_index, color_values, color_indices, colors_to_indices)
face_uv_layers = []
for l in range(len(uv_indices_layers)):
uv_values = uv_values_layers[l]
uv_indices = uv_indices_layers[l]
face_uv_indices = generate_uv_indices_for_poly(poly_index, uv_values, uv_indices, uvs_to_indices_list[l])
face_uv_layers.append(face_uv_indices)
face_vertices = []
for vertex_index in range(poly_size):
control_point_index = mesh.GetPolygonVertex(poly_index, vertex_index)
face_vertices.append(control_point_index)
#TODO: assign a default material to any mesh without one
if len(material_offset_list) <= mesh_index:
material_offset = 0
else:
material_offset = material_offset_list[mesh_index]
vertex_offset = vertex_offset_list[mesh_index]
if poly_size > 4:
new_face_normals = []
new_face_colors = []
new_face_uv_layers = []
for i in range(poly_size - 2):
new_face_vertices = [face_vertices[0], face_vertices[i+1], face_vertices[i+2]]
if len(face_normals):
new_face_normals = [face_normals[0], face_normals[i+1], face_normals[i+2]]
if len(face_colors):
new_face_colors = [face_colors[0], face_colors[i+1], face_colors[i+2]]
if len(face_uv_layers):
new_face_uv_layers = []
for layer in face_uv_layers:
new_face_uv_layers.append([layer[0], layer[i+1], layer[i+2]])
face = generate_mesh_face(mesh,
poly_index,
new_face_vertices,
new_face_normals,
new_face_colors,
new_face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
else:
face = generate_mesh_face(mesh,
poly_index,
face_vertices,
face_normals,
face_colors,
face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
return faces
def generate_mesh_face(mesh, polygon_index, vertex_indices, normals, colors, uv_layers, vertex_offset, material_offset, flipOrder):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = False
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
hasMaterial = True
break
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
if flipOrder:
if nVertices == 3:
vertex_indices = [vertex_indices[0], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[2], polygon_uvs[1]])
uv_layers = tmp
else:
vertex_indices = [vertex_indices[0], vertex_indices[3], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[3], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[3], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[3], polygon_uvs[2], polygon_uvs[3]])
uv_layers = tmp
for i in range(nVertices):
index = vertex_indices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
material_id = 0
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
material_id += material_offset
faceData.append( material_id )
if hasFaceVertexUvs:
for polygon_uvs in uv_layers:
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = normals[i]
faceData.append(index)
if hasFaceVertexColors:
for i in range(nVertices):
index = colors[i]
faceData.append(index)
return faceData
# #####################################################
# Generate Mesh Object (for scene output format)
# #####################################################
def generate_scene_output(node):
mesh = node.GetNodeAttribute()
# This is done in order to keep the scene output and non-scene output code DRY
mesh_list = [ mesh ]
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable automatic json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = ChunkedIndent(vertices, 15, True)
normal_values = ChunkedIndent(normal_values, 15, True)
color_values = ChunkedIndent(color_values, 15)
faces = ChunkedIndent(faces, 30)
metadata = {
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Mesh Object (for non-scene output)
# #####################################################
def generate_non_scene_output(scene):
mesh_list = generate_mesh_list(scene)
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = NoIndent(vertices)
normal_values = NoIndent(normal_values)
color_values = NoIndent(color_values)
faces = NoIndent(faces)
metadata = {
'formatVersion' : 3,
'type' : 'geometry',
'generatedBy' : 'convert-to-threejs.py',
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
def generate_mesh_list_from_hierarchy(node, mesh_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
mesh_list.append(node.GetNodeAttribute())
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
def generate_mesh_list(scene):
mesh_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
return mesh_list
# #####################################################
# Generate Embed Objects
# #####################################################
def generate_embed_dict_from_hierarchy(node, embed_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_object = generate_scene_output(node)
embed_name = getPrefixedName(node, 'Embed')
embed_dict[embed_name] = embed_object
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
def generate_embed_dict(scene):
embed_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
return embed_dict
# #####################################################
# Generate Geometry Objects
# #####################################################
def generate_geometry_object(node):
output = {
'type' : 'embedded',
'id' : getPrefixedName( node, 'Embed' )
}
return output
def generate_geometry_dict_from_hierarchy(node, geometry_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_object = generate_geometry_object(node)
geometry_name = getPrefixedName( node, 'Geometry' )
geometry_dict[geometry_name] = geometry_object
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
def generate_geometry_dict(scene):
geometry_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
return geometry_dict
# #####################################################
# Generate Light Node Objects
# #####################################################
def generate_default_light():
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = {
'type': 'DirectionalLight',
'color': getHex(color),
'intensity': intensity/100.00,
'direction': serializeVector3( direction ),
'target': getObjectName( None )
}
return output
def generate_light_object(node):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = None
if light_type == "directional":
# Three.js directional lights emit light from a point in 3d space to a target node or the origin.
# When there is no target, we need to take a point, one unit away from the origin, and move it
# into the right location so that the origin acts like the target
if node.GetTarget():
direction = position
else:
translation = FbxVector4(0,0,0,0)
scale = FbxVector4(1,1,1,1)
rotation = transform.GetR()
matrix = FbxMatrix(translation, rotation, scale)
direction = matrix.MultNormalize(FbxVector4(0,1,0,1))
output = {
'type': 'DirectionalLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'direction': serializeVector3( direction ),
'target': getObjectName( node.GetTarget() )
}
elif light_type == "point":
output = {
'type': 'PointLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get()
}
elif light_type == "spot":
output = {
'type': 'SpotLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get(),
'angle': light.OuterAngle.Get()*math.pi/180,
'exponent': light.DecayType.Get(),
'target': getObjectName( node.GetTarget() )
}
return output
def generate_ambient_light(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
output = {
'type': 'AmbientLight',
'color': getHex(ambient_color)
}
return output
# #####################################################
# Generate Camera Node Objects
# #####################################################
def generate_default_camera():
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
def generate_camera_object(node):
camera = node.GetNodeAttribute()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
name = getObjectName( node )
output = {}
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'aspect': aspect,
'near': near,
'far': far,
'position': serializeVector3( position )
}
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = {
'type': 'PerspectiveCamera',
'left': left,
'right': right,
'top': top,
'bottom': bottom,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
# #####################################################
# Generate Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate Mesh Node Object
# #####################################################
def generate_mesh_object(node):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
if not material_count > 1 and not len(material_names) > 0:
material_names.append('')
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node, True) if material_count > 1 else material_names[0]
output = {
'geometry': getPrefixedName( node, 'Geometry' ),
'material': material_name,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True,
}
return output
# #####################################################
# Generate Node Object
# #####################################################
def generate_object(node):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
name = getObjectName( node )
output = {
'fbx_type': node_type,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True
}
return output
# #####################################################
# Parse Scene Node Objects
# #####################################################
def generate_object_hierarchy(node, object_dict):
object_count = 0
if node.GetNodeAttribute() == None:
object_data = generate_object(node)
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_data = generate_mesh_object(node)
elif attribute_type == FbxNodeAttribute.eLight:
object_data = generate_light_object(node)
elif attribute_type == FbxNodeAttribute.eCamera:
object_data = generate_camera_object(node)
else:
object_data = generate_object(node)
object_count += 1
object_name = getObjectName(node)
object_children = {}
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_children)
if node.GetChildCount() > 0:
# Having 'children' above other attributes is hard to read.
# We can send it to the bottom using the last letter of the alphabet 'z'.
# This letter is removed from the final output.
if option_pretty_print:
object_data['zchildren'] = object_children
else:
object_data['children'] = object_children
object_dict[object_name] = object_data
return object_count
def generate_scene_objects(scene):
object_count = 0
object_dict = {}
ambient_light = generate_ambient_light(scene)
if ambient_light:
object_dict['AmbientLight'] = ambient_light
object_count += 1
if option_default_light:
default_light = generate_default_light()
object_dict['DefaultLight'] = default_light
object_count += 1
if option_default_camera:
default_camera = generate_default_camera()
object_dict['DefaultCamera'] = default_camera
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_dict)
return object_dict, object_count
# #####################################################
# Generate Scene Output
# #####################################################
def extract_scene(scene, filename):
global_settings = scene.GetGlobalSettings()
objects, nobjects = generate_scene_objects(scene)
textures = generate_texture_dict(scene)
materials = generate_material_dict(scene)
geometries = generate_geometry_dict(scene)
embeds = generate_embed_dict(scene)
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
position = serializeVector3( (0,0,0) )
rotation = serializeVector3( (0,0,0) )
scale = serializeVector3( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
# This does not seem to be any help here
# global_settings.GetDefaultCamera()
defcamera = camera_names[0] if len(camera_names) > 0 else ""
if option_default_camera:
defcamera = 'default_camera'
metadata = {
'formatVersion': 3.2,
'type': 'scene',
'generatedBy': 'convert-to-threejs.py',
'objects': nobjects,
'geometries': ngeometries,
'materials': nmaterials,
'textures': ntextures
}
transform = {
'position' : position,
'rotation' : rotation,
'scale' : scale
}
defaults = {
'bgcolor' : 0,
'camera' : defcamera,
'fog' : ''
}
output = {
'objects': objects,
'geometries': geometries,
'materials': materials,
'textures': textures,
'embeds': embeds,
'transform': transform,
'defaults': defaults,
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Non-Scene Output
# #####################################################
def extract_geometry(scene, filename):
output = generate_non_scene_output(scene)
return output
# #####################################################
# File Helpers
# #####################################################
def write_file(filepath, content):
index = filepath.rfind('/')
dir = filepath[0:index]
#if not os.path.exists(dir):
#os.makedirs(dir)
out = open(filepath, "w")
out.write(content.encode('utf8', 'replace'))
out.close()
def read_file(filepath):
f = open(filepath)
content = f.readlines()
f.close()
return content
def copy_textures(textures):
texture_dict = {}
for key in textures:
url = textures[key]['fullpath']
#src = replace_OutFolder2inFolder(url)
#print( src )
#print( url )
if url in texture_dict: # texture has been copied
continue
if not os.path.exists(url):
print("copy_texture error: we can't find this texture at " + url)
continue
try:
index = url.rfind('/')
if index == -1:
index = url.rfind( '\\' )
filename = url[index+1:len(url)]
saveFolder = "maps"
saveFilename = saveFolder + "/" + filename
#print( src )
#print( url )
#print( saveFilename )
if not os.path.exists(saveFolder):
os.makedirs(saveFolder)
shutil.copyfile(url, saveFilename)
texture_dict[url] = True
except IOError as e:
print "I/O error({0}): {1} {2}".format(e.errno, e.strerror, url)
def findFilesWithExt(directory, ext, include_path = True):
ext = ext.lower()
found = []
for root, dirs, files in os.walk(directory):
for filename in files:
current_ext = os.path.splitext(filename)[1].lower()
if current_ext == ext:
if include_path:
found.append(os.path.join(root, filename))
else:
found.append(filename)
return found
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--ignore-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-n', '--no-texture-copy', action='store_true', dest='notexturecopy', help="don't copy texture files", default=False)
parser.add_option('-u', '--force-prefix', action='store_true', dest='prefix', help="prefix all object names in output file to ensure uniqueness", default=False)
parser.add_option('-f', '--flatten-scene', action='store_true', dest='geometry', help="merge all geometries and apply node transforms", default=False)
parser.add_option('-y', '--force-y-up', action='store_true', dest='forceyup', help="ensure that the y axis shows up", default=False)
parser.add_option('-c', '--add-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--add-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
parser.add_option('-p', '--pretty-print', action='store_true', dest='pretty', help="prefix all object names in output file", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_copy_textures = True if not options.notexturecopy else False
option_prefix = options.prefix
option_geometry = options.geometry
option_forced_y_up = options.forceyup
option_default_camera = options.defcamera
option_default_light = options.deflight
option_pretty_print = options.pretty
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
axis_system = FbxAxisSystem.MayaYUp
if not option_forced_y_up:
# According to asset's coordinate to convert scene
upVector = scene.GetGlobalSettings().GetAxisSystem().GetUpVector();
if upVector[0] == 3:
axis_system = FbxAxisSystem.MayaZUp
axis_system.ConvertScene(scene)
inputFolder = args[0].replace( "\\", "/" );
index = args[0].rfind( "/" );
inputFolder = inputFolder[:index]
outputFolder = args[1].replace( "\\", "/" );
index = args[1].rfind( "/" );
outputFolder = outputFolder[:index]
if option_geometry:
output_content = extract_geometry(scene, os.path.basename(args[0]))
else:
output_content = extract_scene(scene, os.path.basename(args[0]))
if option_pretty_print:
output_string = json.dumps(output_content, indent=4, cls=CustomEncoder, separators=(',', ': '), sort_keys=True)
output_string = executeRegexHacks(output_string)
else:
output_string = json.dumps(output_content, separators=(',', ': '), sort_keys=True)
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_string)
if option_copy_textures:
copy_textures( output_content['textures'] )
print("\nExported Three.js file to:\n%s\n" % output_path)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0)
| mit |
instantchow/home-assistant | homeassistant/components/sensor/steam_online.py | 12 | 2181 | """
Sensor for Steam account status.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.steam_online/
"""
from homeassistant.helpers.entity import Entity
from homeassistant.const import CONF_API_KEY
ICON = 'mdi:steam'
REQUIREMENTS = ['steamodd==4.21']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Steam platform."""
import steam as steamod
steamod.api.key.set(config.get(CONF_API_KEY))
add_devices(
[SteamSensor(account,
steamod) for account in config.get('accounts', [])])
class SteamSensor(Entity):
"""A class for the Steam account."""
# pylint: disable=abstract-method
def __init__(self, account, steamod):
"""Initialize the sensor."""
self._steamod = steamod
self._account = account
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._profile.persona
@property
def entity_id(self):
"""Return the entity ID."""
return 'sensor.steam_{}'.format(self._account)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
# pylint: disable=no-member
def update(self):
"""Update device state."""
self._profile = self._steamod.user.profile(self._account)
if self._profile.current_game[2] is None:
self._game = 'None'
else:
self._game = self._profile.current_game[2]
self._state = {
1: 'Online',
2: 'Busy',
3: 'Away',
4: 'Snooze',
5: 'Trade',
6: 'Play',
}.get(self._profile.status, 'Offline')
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {'Game': self._game}
@property
def entity_picture(self):
"""Avatar of the account."""
return self._profile.avatar_medium
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
| mit |
wujinjun/TFbook | chapter5/models-master/slim/nets/inception_v2_test.py | 26 | 10873 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import inception
slim = tf.contrib.slim
class InceptionV2Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_5c, end_points = inception.inception_v2_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a',
'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_5a', 'Mixed_5b', 'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v2_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV2/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Mixed_3b': [batch_size, 28, 28, 256],
'Mixed_3c': [batch_size, 28, 28, 320],
'Mixed_4a': [batch_size, 14, 14, 576],
'Mixed_4b': [batch_size, 14, 14, 576],
'Mixed_4c': [batch_size, 14, 14, 576],
'Mixed_4d': [batch_size, 14, 14, 576],
'Mixed_4e': [batch_size, 14, 14, 576],
'Mixed_5a': [batch_size, 7, 7, 1024],
'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024],
'Conv2d_1a_7x7': [batch_size, 112, 112, 64],
'MaxPool_2a_3x3': [batch_size, 56, 56, 64],
'Conv2d_2b_1x1': [batch_size, 56, 56, 64],
'Conv2d_2c_3x3': [batch_size, 56, 56, 192],
'MaxPool_3a_3x3': [batch_size, 28, 28, 192]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(10173112, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v2(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v2(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception.inception_v2(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
logits, _ = inception.inception_v2(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main()
| gpl-3.0 |
maxfierke/android_kernel_samsung_aries | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
tareqalam/plone.formbuilder | src/plone/formbuilder/services/add.py | 1 | 2845 | # -*- coding: utf-8 -*-
from plone.restapi.deserializer import json_body
from plone.restapi.exceptions import DeserializationError
from plone.restapi.interfaces import IDeserializeFromJson
from plone.restapi.interfaces import ISerializeToJson
from plone.restapi.services import Service
from plone.restapi.services.content.utils import create
from plone.restapi.services.content.utils import rename
from zExceptions import BadRequest
from zope.component import queryMultiAdapter
from zope.interface import alsoProvides
import plone.protect.interfaces
import datetime
class SchemaFormDataPost(Service):
"""Creates a new content object.
"""
def reply(self):
data = json_body(self.request)
type_ = data.get('@type', 'SchemaFormData')
id_ = data.get('id', None)
title = data.get('title', 'Data at %s' % datetime.datetime.now().strftime('%Y-%m-%d %H%M%S'))
# import pdb;pdb.set_trace()
schema_form_data = data.get('schema_form_data', {})
if not type_:
raise BadRequest("Property '@type' is required")
# Disable CSRF protection
if 'IDisableCSRFProtection' in dir(plone.protect.interfaces):
alsoProvides(self.request,
plone.protect.interfaces.IDisableCSRFProtection)
obj = create(self.context, type_, id_=id_,
title=title)
if isinstance(obj, dict) and 'error' in obj:
self.request.response.setStatus(400)
return obj
obj.schema_form_data = schema_form_data
# Update fields
# deserializer = queryMultiAdapter((obj, self.request),
# IDeserializeFromJson)
# if deserializer is None:
# self.request.response.setStatus(501)
# return dict(error=dict(
# message='Cannot deserialize type {}'.format(obj.portal_type)))
# try:
# deserializer(validate_all=True)
# except DeserializationError as e:
# self.request.response.setStatus(400)
# return dict(error=dict(
# type='DeserializationError',
# message=str(e)))
# Rename if generated id
if not id_:
rename(obj)
self.request.response.setStatus(201)
self.request.response.setHeader('Location', obj.absolute_url())
# serializer = queryMultiAdapter(
# (obj, self.request),
# ISerializeToJson
# )
# serialized_obj = serializer()
# HypermediaBatch can't determine the correct canonical URL for
# objects that have just been created via POST - so we make sure
# to set it here
# serialized_obj['@id'] = obj.absolute_url()
success = {'message': 'successfully added data'}
return success
| gpl-3.0 |
HiroIshikawa/21playground | microblog/flask/lib/python3.5/site-packages/werkzeug/contrib/limiter.py | 365 | 1334 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
| mit |
L3K0V/lifebelt | server/api/members/migrations/0002_auto_20151009_2237.py | 1 | 2005 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0002_course'),
('members', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('github', models.CharField(max_length=48, blank=True)),
('github_id', models.PositiveIntegerField(blank=True, null=True)),
('github_token', models.CharField(max_length=256, blank=True)),
('avatar_url', models.CharField(max_length=256, blank=True)),
('student_class', models.CharField(max_length=1, blank=True, null=True, choices=[('A', 'A'), ('B', 'B'), ('V', 'V'), ('G', 'G')])),
('student_grade', models.PositiveSmallIntegerField(blank=True, null=True)),
('student_number', models.PositiveSmallIntegerField(blank=True, null=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, related_name='member')),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('role', models.CharField(max_length=1, default='S', choices=[('S', 'Student'), ('T', 'Teacher')])),
('course', models.ForeignKey(to='courses.Course', related_name='member')),
('member', models.ForeignKey(to='members.Member', related_name='course')),
],
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([('course', 'member')]),
),
]
| mit |
mlperf/training_results_v0.7 | Google/benchmarks/bert/implementations/bert-cloud-TF2.0-gpu-v100-8/tf2_common/benchmark/bert_benchmark_utils.py | 2 | 4276 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions or classes shared between BERT benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
# pylint: disable=g-bad-import-order
import REDACTED
import numpy as np
from absl import flags
import tensorflow.compat.v2 as tf
# pylint: enable=g-bad-import-order
from REDACTED.tf2_common.utils.flags import core as flags_core
from REDACTED.tf2_common.utils.testing.perfzero_benchmark import PerfZeroBenchmark
FLAGS = flags.FLAGS
class BenchmarkTimerCallback(tf.keras.callbacks.Callback):
"""Callback that records time it takes to run each batch."""
def __init__(self, num_batches_to_skip=10):
super(BenchmarkTimerCallback, self).__init__()
self.batch_start_times = {}
self.batch_stop_times = {}
def on_batch_begin(self, batch, logs=None):
self.batch_start_times[batch] = time.time()
def on_batch_end(self, batch, logs=None):
self.batch_stop_times[batch] = time.time()
def get_examples_per_sec(self, batch_size, num_batches_to_skip=1):
batch_durations = []
for batch in self.batch_start_times:
if batch in self.batch_stop_times and batch >= num_batches_to_skip:
batch_durations.append(self.batch_stop_times[batch] -
self.batch_start_times[batch])
return batch_size / np.mean(batch_durations)
def get_startup_time(self, program_start_time):
return self.batch_start_times[0] - program_start_time
class BertBenchmarkBase(PerfZeroBenchmark):
"""Base class to hold methods common to test classes."""
local_flags = None
def __init__(self, output_dir=None):
super(BertBenchmarkBase, self).__init__(output_dir=output_dir)
self.num_gpus = 8
self.timer_callback = None
def _setup(self):
"""Sets up and resets flags before each test."""
super(BertBenchmarkBase, self)._setup()
self.timer_callback = BenchmarkTimerCallback()
def _report_benchmark(self, stats, wall_time_sec, min_accuracy, max_accuracy):
"""Report benchmark results by writing to local protobuf file.
Args:
stats: dict returned from BERT models with known entries.
wall_time_sec: the during of the benchmark execution in seconds
min_accuracy: Minimum classification accuracy constraint to verify
correctness of the model.
max_accuracy: Maximum classification accuracy constraint to verify
correctness of the model.
"""
metrics = [{
'name': 'training_loss',
'value': stats['train_loss'],
}]
if self.timer_callback:
metrics.append({
'name':
'exp_per_second',
'value':
self.timer_callback.get_examples_per_sec(FLAGS.train_batch_size *
FLAGS.steps_per_loop)
})
else:
metrics.append({
'name': 'exp_per_second',
'value': 0.0,
})
if self.timer_callback and 'start_time_sec' in stats:
metrics.append({
'name': 'startup_time',
'value': self.timer_callback.get_startup_time(stats['start_time_sec'])
})
if 'eval_metrics' in stats:
metrics.append({
'name': 'eval_accuracy',
'value': stats['eval_metrics'],
'min_value': min_accuracy,
'max_value': max_accuracy,
})
flags_str = flags_core.get_nondefault_flags_as_str()
self.report_benchmark(
iters=stats['total_training_steps'],
wall_time=wall_time_sec,
metrics=metrics,
extras={'flags': flags_str})
| apache-2.0 |
ojake/django | tests/prefetch_related/test_uuid.py | 291 | 4347 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Flea, House, Person, Pet, Room
class UUIDPrefetchRelated(TestCase):
def test_prefetch_related_from_uuid_model(self):
Pet.objects.create(name='Fifi').people.add(
Person.objects.create(name='Ellen'),
Person.objects.create(name='George'),
)
with self.assertNumQueries(2):
pet = Pet.objects.prefetch_related('people').get(name='Fifi')
with self.assertNumQueries(0):
self.assertEqual(2, len(pet.people.all()))
def test_prefetch_related_to_uuid_model(self):
Person.objects.create(name='Bella').pets.add(
Pet.objects.create(name='Socks'),
Pet.objects.create(name='Coffee'),
)
with self.assertNumQueries(2):
person = Person.objects.prefetch_related('pets').get(name='Bella')
with self.assertNumQueries(0):
self.assertEqual(2, len(person.pets.all()))
def test_prefetch_related_from_uuid_model_to_uuid_model(self):
fleas = [Flea.objects.create() for i in range(3)]
Pet.objects.create(name='Fifi').fleas_hosted.add(*fleas)
Pet.objects.create(name='Bobo').fleas_hosted.add(*fleas)
with self.assertNumQueries(2):
pet = Pet.objects.prefetch_related('fleas_hosted').get(name='Fifi')
with self.assertNumQueries(0):
self.assertEqual(3, len(pet.fleas_hosted.all()))
with self.assertNumQueries(2):
flea = Flea.objects.prefetch_related('pets_visited').get(pk=fleas[0].pk)
with self.assertNumQueries(0):
self.assertEqual(2, len(flea.pets_visited.all()))
class UUIDPrefetchRelatedLookups(TestCase):
@classmethod
def setUpTestData(cls):
house = House.objects.create(name='Redwood', address='Arcata')
room = Room.objects.create(name='Racoon', house=house)
fleas = [Flea.objects.create(current_room=room) for i in range(3)]
pet = Pet.objects.create(name='Spooky')
pet.fleas_hosted.add(*fleas)
person = Person.objects.create(name='Bob')
person.houses.add(house)
person.pets.add(pet)
person.fleas_hosted.add(*fleas)
def test_from_uuid_pk_lookup_uuid_pk_integer_pk(self):
# From uuid-pk model, prefetch <uuid-pk model>.<integer-pk model>:
with self.assertNumQueries(4):
spooky = Pet.objects.prefetch_related('fleas_hosted__current_room__house').get(name='Spooky')
with self.assertNumQueries(0):
self.assertEqual('Racoon', spooky.fleas_hosted.all()[0].current_room.name)
def test_from_uuid_pk_lookup_integer_pk2_uuid_pk2(self):
# From uuid-pk model, prefetch <integer-pk model>.<integer-pk model>.<uuid-pk model>.<uuid-pk model>:
with self.assertNumQueries(5):
spooky = Pet.objects.prefetch_related('people__houses__rooms__fleas').get(name='Spooky')
with self.assertNumQueries(0):
self.assertEqual(3, len(spooky.people.all()[0].houses.all()[0].rooms.all()[0].fleas.all()))
def test_from_integer_pk_lookup_uuid_pk_integer_pk(self):
# From integer-pk model, prefetch <uuid-pk model>.<integer-pk model>:
with self.assertNumQueries(3):
racoon = Room.objects.prefetch_related('fleas__people_visited').get(name='Racoon')
with self.assertNumQueries(0):
self.assertEqual('Bob', racoon.fleas.all()[0].people_visited.all()[0].name)
def test_from_integer_pk_lookup_integer_pk_uuid_pk(self):
# From integer-pk model, prefetch <integer-pk model>.<uuid-pk model>:
with self.assertNumQueries(3):
redwood = House.objects.prefetch_related('rooms__fleas').get(name='Redwood')
with self.assertNumQueries(0):
self.assertEqual(3, len(redwood.rooms.all()[0].fleas.all()))
def test_from_integer_pk_lookup_integer_pk_uuid_pk_uuid_pk(self):
# From integer-pk model, prefetch <integer-pk model>.<uuid-pk model>.<uuid-pk model>:
with self.assertNumQueries(4):
redwood = House.objects.prefetch_related('rooms__fleas__pets_visited').get(name='Redwood')
with self.assertNumQueries(0):
self.assertEqual('Spooky', redwood.rooms.all()[0].fleas.all()[0].pets_visited.all()[0].name)
| bsd-3-clause |
mozilla/verbatim | vendor/lib/python/django/utils/ipv6.py | 99 | 7871 | # This code was mostly based on ipaddr-py
# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
# Licensed under the Apache License, Version 2.0 (the "License").
from django.core.exceptions import ValidationError
def clean_ipv6_address(ip_str, unpack_ipv4=False,
error_message="This is not a valid IPv6 address"):
"""
Cleans a IPv6 address string.
Validity is checked by calling is_valid_ipv6_address() - if an
invalid address is passed, ValidationError is raised.
Replaces the longest continious zero-sequence with "::" and
removes leading zeroes and makes sure all hextets are lowercase.
Args:
ip_str: A valid IPv6 address.
unpack_ipv4: if an IPv4-mapped address is found,
return the plain IPv4 address (default=False).
error_message: A error message for in the ValidationError.
Returns:
A compressed IPv6 address, or the same value
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
if not is_valid_ipv6_address(ip_str):
raise ValidationError(error_message)
# This algorithm can only handle fully exploded
# IP strings
ip_str = _explode_shorthand_ip_string(ip_str)
ip_str = _sanitize_ipv4_mapping(ip_str)
# If needed, unpack the IPv4 and return straight away
# - no need in running the rest of the algorithm
if unpack_ipv4:
ipv4_unpacked = _unpack_ipv4(ip_str)
if ipv4_unpacked:
return ipv4_unpacked
hextets = ip_str.split(":")
for index in range(len(hextets)):
# Remove leading zeroes
hextets[index] = hextets[index].lstrip('0')
if not hextets[index]:
hextets[index] = '0'
# Determine best hextet to compress
if hextets[index] == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
# Compress the most suitable hextet
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
result = ":".join(hextets)
return result.lower()
def _sanitize_ipv4_mapping(ip_str):
"""
Sanitize IPv4 mapping in a expanded IPv6 address.
This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
If there is nothing to sanitize, returns an unchanged
string.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The sanitized output string, if applicable.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
# not an ipv4 mapping
return ip_str
hextets = ip_str.split(':')
if '.' in hextets[-1]:
# already sanitized
return ip_str
ipv4_address = "%d.%d.%d.%d" % (
int(hextets[6][0:2], 16),
int(hextets[6][2:4], 16),
int(hextets[7][0:2], 16),
int(hextets[7][2:4], 16),
)
result = ':'.join(hextets[0:6])
result += ':' + ipv4_address
return result
def _unpack_ipv4(ip_str):
"""
Unpack an IPv4 address that was mapped in a compressed IPv6 address.
This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
If there is nothing to sanitize, returns None.
Args:
ip_str: A string, the expanded IPv6 address.
Returns:
The unpacked IPv4 address, or None if there was nothing to unpack.
"""
if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
return None
hextets = ip_str.split(':')
return hextets[-1]
def is_valid_ipv6_address(ip_str):
"""
Ensure we have a valid IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if this is a valid IPv6 address.
"""
from django.core.validators import validate_ipv4_address
# We need to have at least one ':'.
if ':' not in ip_str:
return False
# We can only have one '::' shortener.
if ip_str.count('::') > 1:
return False
# '::' should be encompassed by start, digits or end.
if ':::' in ip_str:
return False
# A single colon can neither start nor end an address.
if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
(ip_str.endswith(':') and not ip_str.endswith('::'))):
return False
# We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
if ip_str.count(':') > 7:
return False
# If we have no concatenation, we need to have 8 fields with 7 ':'.
if '::' not in ip_str and ip_str.count(':') != 7:
# We might have an IPv4 mapped address.
if ip_str.count('.') != 3:
return False
ip_str = _explode_shorthand_ip_string(ip_str)
# Now that we have that all squared away, let's check that each of the
# hextets are between 0x0 and 0xFFFF.
for hextet in ip_str.split(':'):
if hextet.count('.') == 3:
# If we have an IPv4 mapped address, the IPv4 portion has to
# be at the end of the IPv6 portion.
if not ip_str.split(':')[-1] == hextet:
return False
try:
validate_ipv4_address(hextet)
except ValidationError:
return False
else:
try:
# a value error here means that we got a bad hextet,
# something like 0xzzzz
if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
return False
except ValueError:
return False
return True
def _explode_shorthand_ip_string(ip_str):
"""
Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if not _is_shorthand_ip(ip_str):
# We've already got a longhand ip_str.
return ip_str
new_ip = []
hextet = ip_str.split('::')
# If there is a ::, we need to expand it with zeroes
# to get to 8 hextets - unless there is a dot in the last hextet,
# meaning we're doing v4-mapping
if '.' in ip_str.split(':')[-1]:
fill_to = 7
else:
fill_to = 8
if len(hextet) > 1:
sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
new_ip = hextet[0].split(':')
for _ in xrange(fill_to - sep):
new_ip.append('0000')
new_ip += hextet[1].split(':')
else:
new_ip = ip_str.split(':')
# Now need to make sure every hextet is 4 lower case characters.
# If a hextet is < 4 characters, we've got missing leading 0's.
ret_ip = []
for hextet in new_ip:
ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
return ':'.join(ret_ip)
def _is_shorthand_ip(ip_str):
"""Determine if the address is shortened.
Args:
ip_str: A string, the IPv6 address.
Returns:
A boolean, True if the address is shortened.
"""
if ip_str.count('::') == 1:
return True
if filter(lambda x: len(x) < 4, ip_str.split(':')):
return True
return False
| gpl-2.0 |
BioroboticsLab/diktya | tests/test_func_api_helpers.py | 1 | 3885 | # Copyright 2015 Leon Sixt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from diktya.func_api_helpers import sequential, concat, save_model, load_model, \
get_hdf5_attr
import h5py
from keras.layers.core import Dense
from keras.engine.topology import Input
from keras.engine.training import collect_trainable_weights
from keras.models import Model
import keras.backend as K
def test_concat():
x = Input(shape=(20,))
y = Input(shape=(20,))
model = Model([x, y], concat([x, y]))
in_x = np.random.sample((32, 20))
in_y = np.random.sample((32, 20))
model.compile('adam', 'mse')
assert model.predict_on_batch([in_x, in_y]).shape == (32, 40)
def test_sequential():
x = Input(shape=(20,))
seq = sequential([
Dense(20),
Dense(10),
Dense(1),
])
out = seq(x)
model = Model([x], [out])
model.compile('adam', 'mse')
model.predict_on_batch(np.random.sample((64, 20)))
def test_sequential_flatten():
x = Input(shape=(20,))
seq = sequential([
Dense(20),
[
Dense(10)
],
[
[
Dense(1)
],
Dense(1)
]
])
out = seq(x)
model = Model([x], [out])
model.compile('adam', 'mse')
model.predict_on_batch(np.random.sample((64, 20)))
def test_sequential_trainable():
x = Input(shape=(20,))
dense1 = Dense(20)
dense2 = Dense(10)
dense3 = Dense(1)
seq = sequential([
dense1,
dense2,
dense3,
], trainable=False)
seq(x)
assert collect_trainable_weights(dense1) == []
assert collect_trainable_weights(dense2) == []
assert collect_trainable_weights(dense3) == []
def test_sequential_namespace():
x = Input(shape=(20,))
dense1 = Dense(20)
dense2 = Dense(10)
dense3 = Dense(1)
seq = sequential([
dense1,
dense2,
dense3,
], ns='hello')
seq(x)
assert dense1.name.startswith('hello.')
assert dense2.name.startswith('hello.')
assert dense3.name.startswith('hello.')
def test_sequential_enumerate():
x = Input(shape=(20,))
dense1 = Dense(20)
dense2 = Dense(10)
dense3 = Dense(1)
seq = sequential([
dense1,
dense2,
dense3,
], ns='hello')
seq(x)
assert dense1.name.endswith('hello.00_dense')
assert dense2.name.endswith('hello.01_dense')
assert dense3.name.endswith('hello.02_dense')
def test_save_model(tmpdir):
x = Input(shape=(20,))
y = sequential([
Dense(15, name='hey'),
Dense(10, name='hoo'),
])(x)
m = Model(x, y)
fname = str(tmpdir.join("weights.hdf5"))
save_model(m, fname)
f = h5py.File(fname, 'r')
print(list(f.attrs.keys()))
f.close()
m_load = load_model(fname)
assert [l.name for l in m.layers] == [l.name for l in m_load.layers]
for l, l_load in zip(m.layers, m_load.layers):
for w, w_load in zip(l.trainable_weights, l_load.trainable_weights):
assert (K.get_value(w) == K.get_value(w_load)).all()
def test_get_hdf5_attr(tmpdir):
fname = str(tmpdir.join("test.hdf5"))
with h5py.File(fname) as f:
f.attrs['name'] = "Ada Lovelance"
assert get_hdf5_attr(fname, 'name') == "Ada Lovelance"
assert get_hdf5_attr(fname, 'not_present', default="default") == "default"
| apache-2.0 |
landism/pants | src/python/pants/util/desktop.py | 8 | 1485 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import subprocess
from pants.util.osutil import get_os_name
class OpenError(Exception):
"""Indicates an error opening a file in a desktop application."""
def _mac_open(files):
subprocess.call(['open'] + list(files))
def _linux_open(files):
cmd = "xdg-open"
if not _cmd_exists(cmd):
raise OpenError("The program '{}' isn't in your PATH. Please install and re-run this "
"goal.".format(cmd))
for f in list(files):
subprocess.call([cmd, f])
# From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def _cmd_exists(cmd):
return subprocess.call(["/usr/bin/which", cmd], shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
_OPENER_BY_OS = {
'darwin': _mac_open,
'linux': _linux_open
}
def ui_open(*files):
"""Attempts to open the given files using the preferred desktop viewer or editor.
:raises :class:`OpenError`: if there is a problem opening any of the files.
"""
if files:
osname = get_os_name()
opener = _OPENER_BY_OS.get(osname)
if opener:
opener(files)
else:
raise OpenError('Open currently not supported for ' + osname)
| apache-2.0 |
ScottWales/rose | lib/python/rose/config_editor/variable.py | 1 | 25435 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-5 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
#-----------------------------------------------------------------------------
import copy
import difflib
import inspect
import os
import re
import sys
import webbrowser
import pango
import pygtk
pygtk.require('2.0')
import gtk
import rose.config_editor.keywidget
import rose.config_editor.menuwidget
import rose.config_editor.valuewidget
import rose.config_editor.valuewidget.array.row as row
import rose.config_editor.valuewidget.source
import rose.config_editor.util
import rose.gtk.dialog
import rose.gtk.util
import rose.reporter
import rose.resource
class VariableWidget(object):
"""This class generates a set of widgets representing the variable.
The set of widgets generated depends on the variable metadata, if any.
Altering values using the widgets will alter the variable object as part
of the internal data model.
"""
def __init__(self, variable, var_ops, is_ghost=False, show_modes=None,
hide_keywidget_subtext=False):
self.variable = variable
self.key = variable.name
self.value = variable.value
self.meta = variable.metadata
self.is_ghost = is_ghost
self.var_ops = var_ops
if show_modes is None:
show_modes = {}
self.show_modes = show_modes
self.insensitive_colour = gtk.Style().bg[0]
self.bad_colour = rose.gtk.util.color_parse(
rose.config_editor.COLOUR_VARIABLE_TEXT_ERROR)
self.hidden_colour = rose.gtk.util.color_parse(
rose.config_editor.COLOUR_VARIABLE_TEXT_IRRELEVANT)
self.keywidget = self.get_keywidget(variable, show_modes)
self.generate_valuewidget(variable)
self.is_inconsistent = False
if 'type' in variable.error:
self._set_inconsistent(self.valuewidget, variable)
self.errors = variable.error.keys()
self.menuwidget = self.get_menuwidget(variable)
self.generate_labelwidget()
self.generate_contentwidget()
self.yoptions = gtk.FILL
self.force_signal_ids = []
self.is_modified = False
for child_widget in self.get_children():
setattr(child_widget, 'get_parent', lambda: self)
self.trigger_ignored = lambda v, b: b
self.get_parent = lambda: None
self.is_ignored = False
self.set_ignored()
self.update_status()
def get_keywidget(self, variable, show_modes):
"""Creates the keywidget attribute, based on the variable name.
Loads 'tooltips' or hover-over text based on the variable metadata.
"""
widget = rose.config_editor.keywidget.KeyWidget(
variable, self.var_ops, self.launch_help, self.update_status,
show_modes
)
widget.show()
return widget
def generate_labelwidget(self):
"""Creates the label widget, a composite of key and menu widgets."""
self.labelwidget = gtk.VBox()
self.labelwidget.show()
self.labelwidget.set_ignored = self.keywidget.set_ignored
menu_offset = self.menuwidget.size_request()[1] / 2
key_offset = self.keywidget.get_centre_height() / 2
menu_vbox = gtk.VBox()
menu_vbox.pack_start(self.menuwidget, expand=False, fill=False,
padding=max([(key_offset - menu_offset), 0]))
menu_vbox.show()
key_vbox = gtk.VBox()
key_vbox.pack_start(self.keywidget, expand=False, fill=False,
padding=max([(menu_offset - key_offset) / 2, 0]))
key_vbox.show()
label_content_hbox = gtk.HBox()
label_content_hbox.pack_start(menu_vbox, expand=False, fill=False)
label_content_hbox.pack_start(key_vbox, expand=False, fill=False)
label_content_hbox.show()
event_box = gtk.EventBox()
event_box.show()
self.labelwidget.pack_start(label_content_hbox, expand=True, fill=True)
self.labelwidget.pack_start(event_box, expand=True, fill=True)
def generate_contentwidget(self):
"""Create the content widget, a vbox-packed valuewidget."""
self.contentwidget = gtk.VBox()
self.contentwidget.show()
content_event_box = gtk.EventBox()
content_event_box.show()
self.contentwidget.pack_start(self.valuewidget, expand=False, fill=False)
self.contentwidget.pack_start(content_event_box, expand=True, fill=True)
def _valuewidget_set_value(self, value):
# This is called by a valuewidget to change the variable value.
self.var_ops.set_var_value(self.variable, value)
self.update_status()
def generate_valuewidget(self, variable, override_custom=False,
use_this_valuewidget=None):
"""Creates the valuewidget attribute, based on value and metadata."""
custom_arg = None
if variable.metadata.get("type") == rose.config_editor.FILE_TYPE_NORMAL:
use_this_valuewidget = (rose.config_editor.
valuewidget.source.SourceValueWidget)
custom_arg = self.var_ops
set_value = self._valuewidget_set_value
hook_object = rose.config_editor.valuewidget.ValueWidgetHook(
rose.config_editor.false_function,
self._get_focus)
metadata = copy.deepcopy(variable.metadata)
if use_this_valuewidget is not None:
self.valuewidget = use_this_valuewidget(variable.value,
metadata,
set_value,
hook_object,
custom_arg)
elif (rose.config_editor.META_PROP_WIDGET in self.meta and
not override_custom):
w_val = self.meta[rose.config_editor.META_PROP_WIDGET]
info = w_val.split(None, 1)
if len(info) > 1:
widget_path, custom_arg = info
else:
widget_path, custom_arg = info[0], None
files = self.var_ops.get_ns_metadata_files(metadata["full_ns"])
lib = os.path.join("lib", "python", "widget")
widget_fpath = os.path.join(lib, *widget_path.split(".")[:-1])
error_handler = lambda e: self.handle_bad_valuewidget(
str(e), variable, set_value)
widget = rose.resource.import_object(widget_path,
files,
error_handler)
if widget is None:
text = rose.config_editor.ERROR_IMPORT_CLASS.format(
w_val)
self.handle_bad_valuewidget(text, variable, set_value)
try:
self.valuewidget = widget(variable.value,
metadata,
set_value,
hook_object,
custom_arg)
except Exception as e:
self.handle_bad_valuewidget(str(e), variable,
set_value)
else:
widget_maker = rose.config_editor.valuewidget.chooser(
variable.value, variable.metadata,
variable.error)
self.valuewidget = widget_maker(variable.value,
metadata, set_value,
hook_object, custom_arg)
for child in self.valuewidget.get_children():
child.connect('focus-in-event', self.handle_focus_in)
child.connect('focus-out-event', self.handle_focus_out)
if hasattr(child, 'get_children'):
for grandchild in child.get_children():
grandchild.connect('focus-in-event',
self.handle_focus_in)
grandchild.connect('focus-out-event',
self.handle_focus_out)
self.valuewidget.show()
def handle_bad_valuewidget(self, error_info, variable, set_value):
"""Handle a bad custom valuewidget import."""
text = rose.config_editor.ERROR_IMPORT_WIDGET.format(error_info)
rose.reporter.Reporter()(
rose.config_editor.util.ImportWidgetError(text))
self.generate_valuewidget(variable, override_custom=True)
def handle_focus_in(self, widget, event):
widget._first_colour = widget.style.base[gtk.STATE_NORMAL]
new_colour = rose.gtk.util.color_parse(
rose.config_editor.COLOUR_VALUEWIDGET_BASE_SELECTED)
widget.modify_base(gtk.STATE_NORMAL, new_colour)
def handle_focus_out(self, widget, event):
if hasattr(widget, "_first_colour"):
widget.modify_base(gtk.STATE_NORMAL, widget._first_colour)
def get_menuwidget(self, variable, menuclass=None):
"""Create the menuwidget attribute, an option menu button."""
if menuclass is None:
menuclass = rose.config_editor.menuwidget.MenuWidget
menuwidget = menuclass(variable,
self.var_ops,
lambda: self.remove_from(self.get_parent()),
self.update_status,
self.launch_help)
menuwidget.show()
return menuwidget
def insert_into(self, container, x_info=None, y_info=None,
no_menuwidget=False):
"""Inserts the child widgets of an instance into the 'container'.
As PyGTK is not that introspective, we need arguments specifying where
the correct area within the widget is - in the case of gtk.Table
instances, we need the number of columns and the row index.
These arguments are generically named x_info and y_info.
"""
if not hasattr(container, 'num_removes'):
setattr(container, 'num_removes', 0)
if isinstance(container, gtk.Table):
row_index = y_info
key_col = 0
container.attach(self.labelwidget,
key_col, key_col + 1,
row_index, row_index + 1,
xoptions=gtk.FILL,
yoptions=gtk.FILL)
container.attach(self.contentwidget,
key_col + 1, key_col + 2,
row_index, row_index + 1,
xpadding=5,
xoptions=gtk.EXPAND|gtk.FILL,
yoptions=self.yoptions)
self.valuewidget.trigger_scroll = (
lambda b, e: self.force_scroll(b, container))
setattr(self, 'get_parent', lambda : container)
elif isinstance(container, gtk.VBox):
container.pack_start(self.labelwidget, expand=False, fill=True,
padding=5)
container.pack_start(self.contentwidget, expand=True, fill=True,
padding=10)
self.valuewidget.trigger_scroll = (
lambda b, e: self.force_scroll(b, container))
setattr(self, 'get_parent', lambda : container)
return container
def force_scroll(self, widget=None, container=None):
"""Adjusts a scrolled window to display the correct widget."""
y_coordinate = None
if widget is not None:
y_coordinate = widget.get_allocation().y
scroll_container = container.get_parent()
if scroll_container is None:
return False
while not isinstance(scroll_container, gtk.ScrolledWindow):
scroll_container = scroll_container.get_parent()
vadj = scroll_container.get_vadjustment()
if vadj.upper == 1.0 or y_coordinate == -1:
if not self.force_signal_ids:
self.force_signal_ids.append(
vadj.connect_after(
'changed',
lambda a: self.force_scroll(widget, container)))
else:
for handler_id in self.force_signal_ids:
vadj.handler_block(handler_id)
self.force_signal_ids = []
vadj.connect('changed', rose.config_editor.false_function)
if y_coordinate is None:
vadj.upper = vadj.upper + 0.08 * vadj.page_size
vadj.set_value(vadj.upper - vadj.page_size)
return False
if y_coordinate == -1: # Bad allocation, don't scroll
return False
if not vadj.value < y_coordinate < vadj.value + 0.95 * vadj.page_size:
vadj.set_value(min(y_coordinate, vadj.upper - vadj.page_size))
return False
def remove_from(self, container):
"""Removes the child widgets of an instance from the 'container'."""
container.num_removes += 1
self.var_ops.remove_var(self.variable)
if isinstance(container, gtk.Table):
for widget_child in self.get_children():
for child in container.get_children():
if child == widget_child:
container.remove(widget_child)
widget_child.destroy()
return container
def get_children(self):
"""Method that returns child widgets - as in some gtk Objects."""
return [self.labelwidget, self.contentwidget]
def hide(self):
for widget in self.get_children():
widget.hide()
def show(self):
for widget in self.get_children():
widget.show()
def set_show_mode(self, show_mode, should_show_mode):
"""Sets or unsets special displays for a variable."""
self.keywidget.set_show_mode(show_mode, should_show_mode)
def set_ignored(self):
"""Sets or unsets a custom ignored state for the widgets."""
ign_map = self.variable.ignored_reason
self.keywidget.set_ignored()
if ign_map != {}:
# Technically ignored, but could just be ignored by section.
self.is_ignored = True
if "'Ignore'" not in self.menuwidget.option_ui:
self.menuwidget.old_option_ui = self.menuwidget.option_ui
self.menuwidget.old_actions = self.menuwidget.actions
if ign_map.keys() == [rose.variable.IGNORED_BY_SECTION]:
# Not ignored in itself, so give Ignore option.
if "'Enable'" in self.menuwidget.option_ui:
self.menuwidget.option_ui = re.sub(
"<menuitem action='Enable'/>",
r"<menuitem action='Ignore'/>",
self.menuwidget.option_ui)
else:
# Ignored in itself, so needs Enable option.
self.menuwidget.option_ui = re.sub(
"<menuitem action='Ignore'/>",
r"<menuitem action='Enable'/>",
self.menuwidget.option_ui)
self.update_status()
self.set_sensitive(False)
else:
# Enabled.
self.is_ignored = False
if "'Enable'" in self.menuwidget.option_ui:
self.menuwidget.option_ui = re.sub(
"<menuitem action='Enable'/>",
r"<menuitem action='Ignore'/>",
self.menuwidget.option_ui)
self.update_status()
if not self.is_ghost:
self.set_sensitive(True)
def update_status(self):
"""Handles variable modified status."""
self.set_modified(self.var_ops.is_var_modified(self.variable))
self.keywidget.update_comment_display()
def set_modified(self, is_modified=True):
"""Applies or unsets a custom 'modified' state for the widgets."""
if is_modified == self.is_modified:
return False
self.is_modified = is_modified
self.keywidget.set_modified(is_modified)
if not is_modified and isinstance(self.keywidget.entry, gtk.Entry):
# This variable should now be displayed as a normal variable.
self.valuewidget.trigger_refresh(self.variable.metadata['id'])
def set_sensitive(self, is_sensitive=True):
"""Sets whether the widgets are grayed-out or 'insensitive'."""
for widget in [self.keywidget, self.valuewidget]:
widget.set_sensitive(is_sensitive)
return False
def grab_focus(self, focus_container=None, scroll_bottom=False,
index=None):
"""Method similar to gtk.Widget - get the keyboard focus."""
if hasattr(self, 'valuewidget'):
self.valuewidget.grab_focus()
if (index is not None and
hasattr(self.valuewidget, 'set_focus_index')):
self.valuewidget.set_focus_index(index)
for child in self.valuewidget.get_children():
if (gtk.SENSITIVE & child.flags() and
gtk.PARENT_SENSITIVE & child.flags()):
break
else:
if hasattr(self, 'menuwidget'):
self.menuwidget.get_children()[0].grab_focus()
if scroll_bottom and focus_container is not None:
self.force_scroll(None, container=focus_container)
if hasattr(self, 'keywidget') and self.key == '':
self.keywidget.grab_focus()
return False
def get_focus_index(self):
"""Get the current cursor position in the variable value string."""
if (hasattr(self, "valuewidget") and
hasattr(self.valuewidget, "get_focus_index")):
return self.valuewidget.get_focus_index()
diff = difflib.SequenceMatcher(None,
self.variable.old_value,
self.variable.value)
# Return all end-of-block indicies for changed blocks
indicies = [x[4] for x in diff.get_opcodes() if x[0] != 'equal']
if not indicies:
return None
return indicies[-1]
def launch_help(self, text_or_url=None):
if text_or_url is None:
if rose.META_PROP_HELP in self.meta:
text_or_url = None
if self.show_modes.get(
rose.config_editor.SHOW_MODE_CUSTOM_HELP):
format_string = rose.config_editor.CUSTOM_FORMAT_HELP
text_or_url = rose.variable.expand_format_string(
format_string,
self.variable)
if text_or_url is None:
text_or_url = self.meta[rose.META_PROP_HELP]
elif 'url' in self.meta:
text_or_url = self.meta[rose.META_PROP_URL]
else:
return
if (text_or_url.startswith('http://') or
text_or_url.startswith('www.')):
webbrowser.open(text_or_url)
else:
self._launch_help_dialog(text_or_url)
def _launch_help_dialog(self, text, url=None):
title = rose.config_editor.DIALOG_HELP_TITLE.format(
self.variable.metadata['id'])
ns = self.variable.metadata["full_ns"]
search_function = lambda i: self.var_ops.search_for_var(ns, i)
rose.gtk.dialog.run_hyperlink_dialog(gtk.STOCK_DIALOG_INFO,
text, title, search_function)
return False
def _set_inconsistent(self, valuewidget, variable):
valuewidget.modify_base(gtk.STATE_NORMAL, self.bad_colour)
self.is_inconsistent = True
widget_list = valuewidget.get_children()
while widget_list:
widget = widget_list.pop()
widget.modify_text(gtk.STATE_NORMAL, self.bad_colour)
if hasattr(widget, 'set_inconsistent'):
widget.set_inconsistent(True)
if isinstance(widget, gtk.RadioButton):
widget.set_active(False)
if (hasattr(widget, 'get_group') and
hasattr(widget.get_group(), 'set_inconsistent')):
widget.get_group().set_inconsistent(True)
if isinstance(widget, gtk.Entry):
widget.modify_fg(gtk.STATE_NORMAL, self.bad_colour)
if isinstance(widget, gtk.SpinButton):
try:
v_value = float(variable.value)
w_value = float(widget.get_value())
except (TypeError, ValueError):
widget.modify_text(gtk.STATE_NORMAL, self.hidden_colour)
else:
if w_value != v_value:
widget.modify_text(gtk.STATE_NORMAL,
self.hidden_colour)
if hasattr(widget, 'get_children'):
widget_list.extend(widget.get_children())
elif hasattr(widget, 'get_child'):
widget_list.append(widget.get_child())
def _set_consistent(self, valuewidget, variable):
normal_style = gtk.Style()
normal_base = normal_style.base[gtk.STATE_NORMAL]
normal_fg = normal_style.fg[gtk.STATE_NORMAL]
normal_text = normal_style.text[gtk.STATE_NORMAL]
valuewidget.modify_base(gtk.STATE_NORMAL, normal_base)
self.is_inconsistent = True
for widget in valuewidget.get_children():
widget.modify_text(gtk.STATE_NORMAL, normal_text)
if hasattr(widget, 'set_inconsistent'):
widget.set_inconsistent(False)
if isinstance(widget, gtk.Entry):
widget.modify_fg(gtk.STATE_NORMAL, normal_fg)
if (hasattr(widget, 'get_group') and
hasattr(widget.get_group(), 'set_inconsistent')):
widget.get_group().set_inconsistent(False)
def _get_focus(self, widget_for_focus):
widget_for_focus.grab_focus()
self.valuewidget.trigger_scroll(widget_for_focus, None)
if isinstance(widget_for_focus, gtk.Entry):
text_length = len(widget_for_focus.get_text())
if text_length > 0:
widget_for_focus.set_position(text_length)
widget_for_focus.select_region(text_length,
text_length)
return False
def needs_type_error_refresh(self):
"""Check if self needs to be re-created on 'type' error."""
if hasattr(self.valuewidget, "handle_type_error"):
return False
return True
def type_error_refresh(self, variable):
"""Handle a type error."""
if rose.META_PROP_TYPE in variable.error:
self._set_inconsistent(self.valuewidget, variable)
else:
self._set_consistent(self.valuewidget, variable)
self.variable = variable
self.errors = variable.error.keys()
self.valuewidget.handle_type_error(rose.META_PROP_TYPE in self.errors)
self.menuwidget.refresh(variable)
self.keywidget.refresh(variable)
class RowVariableWidget(VariableWidget):
"""This class generates a set of widgets for use as a row in a table."""
def __init__(self, *args, **kwargs):
self.length = kwargs.pop("length")
super(RowVariableWidget, self).__init__(*args, **kwargs)
def generate_valuewidget(self, variable, override_custom=False):
"""Creates the valuewidget attribute, based on value and metadata."""
if (rose.META_PROP_LENGTH in variable.metadata or
isinstance(variable.metadata.get(rose.META_PROP_TYPE), list)):
use_this_valuewidget = self.make_row_valuewidget
else:
use_this_valuewidget = None
super(RowVariableWidget, self).generate_valuewidget(
variable, override_custom=override_custom,
use_this_valuewidget=use_this_valuewidget)
def make_row_valuewidget(self, *args, **kwargs):
kwargs.update({"arg_str": str(self.length)})
return row.RowArrayValueWidget(*args, **kwargs)
| gpl-3.0 |
CiscoSystems/tempest | tempest/scenario/test_security_groups_basic_ops.py | 3 | 20917 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
"""
This test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify cross tenant connectivity as follows
ssh:
in order to overcome "ip namespace", each tenant has an "access point"
VM with floating-ip open to incoming ssh connection allowing network
commands (ping/ssh) to be executed from within the
tenant-network-namespace
Tempest host performs key-based authentication to the ssh server via
floating IP address
connectivity test is done by pinging destination server via source server
ssh connection.
success - ping returns
failure - ping_timeout reached
setup:
for primary tenant:
1. create a network&subnet
2. create a router (if public router isn't configured)
3. connect tenant network to public network via router
4. create an access point:
a. a security group open to incoming ssh connection
b. a VM with a floating ip
5. create a general empty security group (same as "default", but
without rules allowing in-tenant traffic)
tests:
1. _verify_network_details
2. _verify_mac_addr: for each access point verify that
(subnet, fix_ip, mac address) are as defined in the port list
3. _test_in_tenant_block: test that in-tenant traffic is disabled
without rules allowing it
4. _test_in_tenant_allow: test that in-tenant traffic is enabled
once an appropriate rule has been created
5. _test_cross_tenant_block: test that cross-tenant traffic is disabled
without a rule allowing it on destination tenant
6. _test_cross_tenant_allow:
* test that cross-tenant traffic is enabled once an appropriate
rule has been created on destination tenant.
* test that reverse traffic is still blocked
* test than reverse traffic is enabled once an appropriate rule has
been created on source tenant
7._test_port_update_new_security_group:
* test that traffic is blocked with default security group
* test that traffic is enabled after updating port with new security
group having appropriate rule
assumptions:
1. alt_tenant/user existed and is different from primary_tenant/user
2. Public network is defined and reachable from the Tempest host
3. Public router can either be:
* defined, in which case all tenants networks can connect directly
to it, and cross tenant check will be done on the private IP of the
destination tenant
or
* not defined (empty string), in which case each tenant will have
its own router connected to the public network
"""
class TenantProperties():
"""
helper class to save tenant details
id
credentials
network
subnet
security groups
servers
access point
"""
def __init__(self, credentials):
self.manager = clients.Manager(credentials)
# Credentials from manager are filled with both names and IDs
self.creds = self.manager.credentials
self.network = None
self.subnet = None
self.router = None
self.security_groups = {}
self.servers = list()
def set_network(self, network, subnet, router):
self.network = network
self.subnet = subnet
self.router = router
@classmethod
def check_preconditions(cls):
if CONF.baremetal.driver_enabled:
msg = ('Not currently supported by baremetal.')
raise cls.skipException(msg)
super(TestSecurityGroupsBasicOps, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable or
CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestSecurityGroupsBasicOps, cls).resource_setup()
# TODO(mnewby) Consider looking up entities as needed instead
# of storing them as collections on the class.
# get credentials for secondary tenant
cls.alt_creds = cls.isolated_creds.get_alt_creds()
cls.alt_manager = clients.Manager(cls.alt_creds)
# Credentials from the manager are filled with both IDs and Names
cls.alt_creds = cls.alt_manager.credentials
cls.floating_ips = {}
cls.tenants = {}
creds = cls.credentials()
cls.primary_tenant = cls.TenantProperties(creds)
cls.alt_tenant = cls.TenantProperties(cls.alt_creds)
for tenant in [cls.primary_tenant, cls.alt_tenant]:
cls.tenants[tenant.creds.tenant_id] = tenant
cls.floating_ip_access = not CONF.network.public_router_id
def cleanup_wrapper(self, resource):
self.cleanup_resource(resource, self.__class__.__name__)
def setUp(self):
super(TestSecurityGroupsBasicOps, self).setUp()
self._deploy_tenant(self.primary_tenant)
self._verify_network_details(self.primary_tenant)
self._verify_mac_addr(self.primary_tenant)
def _create_tenant_keypairs(self, tenant):
keypair = self.create_keypair(tenant.manager.keypairs_client)
tenant.keypair = keypair
def _create_tenant_security_groups(self, tenant):
access_sg = self._create_empty_security_group(
namestart='secgroup_access-',
tenant_id=tenant.creds.tenant_id,
client=tenant.manager.network_client
)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
tenant_id=tenant.creds.tenant_id,
client=tenant.manager.network_client
)
tenant.security_groups.update(access=access_sg, default=def_sg)
ssh_rule = dict(
protocol='tcp',
port_range_min=22,
port_range_max=22,
direction='ingress',
)
self._create_security_group_rule(secgroup=access_sg,
client=tenant.manager.network_client,
**ssh_rule)
def _verify_network_details(self, tenant):
# Checks that we see the newly created network/subnet/router via
# checking the result of list_[networks,routers,subnets]
# Check that (router, subnet) couple exist in port_list
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(tenant.network.name, seen_names)
self.assertIn(tenant.network.id, seen_ids)
seen_subnets = [(n['id'], n['cidr'], n['network_id'])
for n in self._list_subnets()]
mysubnet = (tenant.subnet.id, tenant.subnet.cidr, tenant.network.id)
self.assertIn(mysubnet, seen_subnets)
seen_routers = self._list_routers()
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(tenant.router.name, seen_router_names)
self.assertIn(tenant.router.id, seen_router_ids)
myport = (tenant.router.id, tenant.subnet.id)
router_ports = [(i['device_id'], i['fixed_ips'][0]['subnet_id']) for i
in self._list_ports()
if self._is_router_port(i)]
self.assertIn(myport, router_ports)
def _is_router_port(self, port):
"""Return True if port is a router interface."""
# NOTE(armando-migliaccio): match device owner for both centralized
# and distributed routers; 'device_owner' is "" by default.
return port['device_owner'].startswith('network:router_interface')
def _create_server(self, name, tenant, security_groups=None):
"""
creates a server and assigns to security group
"""
self._set_compute_context(tenant)
if security_groups is None:
security_groups = [tenant.security_groups['default']]
security_groups_names = [{'name': s['name']} for s in security_groups]
create_kwargs = {
'networks': [
{'uuid': tenant.network.id},
],
'key_name': tenant.keypair['name'],
'security_groups': security_groups_names,
'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.assertEqual(
sorted([s['name'] for s in security_groups]),
sorted([s['name'] for s in server['security_groups']]))
return server
def _create_tenant_servers(self, tenant, num=1):
for i in range(num):
name = 'server-{tenant}-gen-{num}-'.format(
tenant=tenant.creds.tenant_name,
num=i
)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant)
tenant.servers.append(server)
def _set_access_point(self, tenant):
"""
creates a server in a secgroup with rule allowing external ssh
in order to access tenant internal network
workaround ip namespace
"""
secgroups = tenant.security_groups.values()
name = 'server-{tenant}-access_point-'.format(
tenant=tenant.creds.tenant_name)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant,
security_groups=secgroups)
tenant.access_point = server
self._assign_floating_ips(tenant, server)
def _assign_floating_ips(self, tenant, server):
public_network_id = CONF.network.public_network_id
floating_ip = self.create_floating_ip(
server, public_network_id,
client=tenant.manager.network_client)
self.floating_ips.setdefault(server['id'], floating_ip)
def _create_tenant_network(self, tenant):
network, subnet, router = self.create_networks(
client=tenant.manager.network_client)
tenant.set_network(network, subnet, router)
def _set_compute_context(self, tenant):
self.servers_client = tenant.manager.servers_client
return self.servers_client
def _deploy_tenant(self, tenant_or_id):
"""
creates:
network
subnet
router (if public not defined)
access security group
access-point server
"""
if not isinstance(tenant_or_id, self.TenantProperties):
tenant = self.tenants[tenant_or_id]
else:
tenant = tenant_or_id
self._set_compute_context(tenant)
self._create_tenant_keypairs(tenant)
self._create_tenant_network(tenant)
self._create_tenant_security_groups(tenant)
self._set_access_point(tenant)
def _get_server_ip(self, server, floating=False):
"""
returns the ip (floating/internal) of a server
"""
if floating:
server_ip = self.floating_ips[server['id']].floating_ip_address
else:
server_ip = None
network_name = self.tenants[server['tenant_id']].network.name
if network_name in server['addresses']:
server_ip = server['addresses'][network_name][0]['addr']
return server_ip
def _connect_to_access_point(self, tenant):
"""
create ssh connection to tenant access point
"""
access_point_ssh = \
self.floating_ips[tenant.access_point['id']].floating_ip_address
private_key = tenant.keypair['private_key']
access_point_ssh = self._ssh_to_server(access_point_ssh,
private_key=private_key)
return access_point_ssh
def _check_connectivity(self, access_point, ip, should_succeed=True):
if should_succeed:
msg = "Timed out waiting for %s to become reachable" % ip
else:
msg = "%s is reachable" % ip
self.assertTrue(self._check_remote_connectivity(access_point, ip,
should_succeed), msg)
def _test_in_tenant_block(self, tenant):
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
ip=self._get_server_ip(server),
should_succeed=False)
def _test_in_tenant_allow(self, tenant):
ruleset = dict(
protocol='icmp',
remote_group_id=tenant.security_groups['default'].id,
direction='ingress'
)
self._create_security_group_rule(
secgroup=tenant.security_groups['default'],
**ruleset
)
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
ip=self._get_server_ip(server))
def _test_cross_tenant_block(self, source_tenant, dest_tenant):
"""
if public router isn't defined, then dest_tenant access is via
floating-ip
"""
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point=access_point_ssh, ip=ip,
should_succeed=False)
def _test_cross_tenant_allow(self, source_tenant, dest_tenant):
"""
check for each direction:
creating rule for tenant incoming traffic enables only 1way traffic
"""
ruleset = dict(
protocol='icmp',
direction='ingress'
)
self._create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
client=dest_tenant.manager.network_client,
**ruleset
)
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point_ssh, ip)
# test that reverse traffic is still blocked
self._test_cross_tenant_block(dest_tenant, source_tenant)
# allow reverse traffic and check
self._create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
client=source_tenant.manager.network_client,
**ruleset
)
access_point_ssh_2 = self._connect_to_access_point(dest_tenant)
ip = self._get_server_ip(source_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point_ssh_2, ip)
def _verify_mac_addr(self, tenant):
"""
verify that VM (tenant's access point) has the same ip,mac as listed in
port list
"""
access_point_ssh = self._connect_to_access_point(tenant)
mac_addr = access_point_ssh.get_mac_address()
mac_addr = mac_addr.strip().lower()
# Get the fixed_ips and mac_address fields of all ports. Select
# only those two columns to reduce the size of the response.
port_list = self._list_ports(fields=['fixed_ips', 'mac_address'])
port_detail_list = [
(port['fixed_ips'][0]['subnet_id'],
port['fixed_ips'][0]['ip_address'],
port['mac_address'].lower())
for port in port_list if port['fixed_ips']
]
server_ip = self._get_server_ip(tenant.access_point)
subnet_id = tenant.subnet.id
self.assertIn((subnet_id, server_ip, mac_addr), port_detail_list)
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_cross_tenant_traffic(self):
if not self.isolated_creds.is_multi_tenant():
raise self.skipException("No secondary tenant defined")
try:
# deploy new tenant
self._deploy_tenant(self.alt_tenant)
self._verify_network_details(self.alt_tenant)
self._verify_mac_addr(self.alt_tenant)
# cross tenant check
source_tenant = self.primary_tenant
dest_tenant = self.alt_tenant
self._test_cross_tenant_block(source_tenant, dest_tenant)
self._test_cross_tenant_allow(source_tenant, dest_tenant)
except Exception:
for tenant in self.tenants.values():
self._log_console_output(servers=tenant.servers)
raise
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_in_tenant_traffic(self):
try:
self._create_tenant_servers(self.primary_tenant, num=1)
# in-tenant check
self._test_in_tenant_block(self.primary_tenant)
self._test_in_tenant_allow(self.primary_tenant)
except Exception:
for tenant in self.tenants.values():
self._log_console_output(servers=tenant.servers)
raise
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_port_update_new_security_group(self):
"""
This test verifies the traffic after updating the vm port with new
security group having appropriate rule.
"""
new_tenant = self.primary_tenant
# Create empty security group and add icmp rule in it
new_sg = self._create_empty_security_group(
namestart='secgroup_new-',
tenant_id=new_tenant.creds.tenant_id,
client=new_tenant.manager.network_client)
icmp_rule = dict(
protocol='icmp',
direction='ingress',
)
self._create_security_group_rule(
secgroup=new_sg,
client=new_tenant.manager.network_client,
**icmp_rule)
new_tenant.security_groups.update(new_sg=new_sg)
# Create server with default security group
name = 'server-{tenant}-gen-1-'.format(
tenant=new_tenant.creds.tenant_name
)
name = data_utils.rand_name(name)
server = self._create_server(name, new_tenant)
# Check connectivity failure with default security group
try:
access_point_ssh = self._connect_to_access_point(new_tenant)
self._check_connectivity(access_point=access_point_ssh,
ip=self._get_server_ip(server),
should_succeed=False)
server_id = server['id']
port_id = self._list_ports(device_id=server_id)[0]['id']
# update port with new security group and check connectivity
self.network_client.update_port(port_id, security_groups=[
new_tenant.security_groups['new_sg'].id])
self._check_connectivity(
access_point=access_point_ssh,
ip=self._get_server_ip(server))
except Exception:
for tenant in self.tenants.values():
self._log_console_output(servers=tenant.servers)
raise
| apache-2.0 |
cctaylor/googleads-python-lib | examples/dfp/v201502/creative_set_service/get_all_creative_sets.py | 3 | 2070 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all creative sets.
To create creative sets, run create_creative_sets.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeSetService.getCreativeSetsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
creative_set_service = client.GetService('CreativeSetService',
version='v201502')
# Create a filter statement.
statement = dfp.FilterStatement()
# Get creative sets by statement.
while True:
response = creative_set_service.getCreativeSetsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for creative_set in response['results']:
print ('Creative set with ID \'%s\' and name \'%s\' was found.'
% (creative_set['id'], creative_set['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
Shiroy/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/assertion/util.py | 175 | 11730 | """Utilities for assertion debugging"""
import pprint
import _pytest._code
import py
try:
from collections import Sequence
except ImportError:
Sequence = list
BuiltinAssertionError = py.builtin.builtins.AssertionError
u = py.builtin._totext
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
try:
return u(s, 'utf-8', 'replace')
except TypeError:
return s
def format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
explanation = ecu(explanation)
explanation = _collapse_false(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u('\n').join(result)
def _collapse_false(explanation):
"""Collapse expansions of False
So this strips out any "assert False\n{where False = ...\n}"
blocks.
"""
where = 0
while True:
start = where = explanation.find("False\n{False = ", where)
if where == -1:
break
level = 0
prev_c = explanation[start]
for i, c in enumerate(explanation[start:]):
if prev_c + c == "\n{":
level += 1
elif prev_c + c == "\n}":
level -= 1
if not level:
break
prev_c = c
else:
raise AssertionError("unbalanced braces: %r" % (explanation,))
end = start + i
where = end
if explanation[end - 1] == '\n':
explanation = (explanation[:start] + explanation[start+15:end-1] +
explanation[end+1:])
where -= 17
return explanation
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u('')).split('\n')
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l and l[0] in ['{', '}', '~', '>']:
lines.append(l)
else:
lines[-1] += '\\n' + l
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = u('and ')
else:
s = u('where ')
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ['~', '>']
stack[-1] += 1
indent = len(stack) if line.startswith('~') else len(stack) - 1
result.append(u(' ')*indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width/2))
right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
not isinstance(x, basestring))
istext = lambda x: isinstance(x, basestring)
isdict = lambda x: isinstance(x, dict)
isset = lambda x: isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption('verbose')
explanation = None
try:
if op == '==':
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == 'not in':
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
explanation = [
u('(pytest_assertion plugin: representation of details failed. '
'Probably an object has a faulty __repr__.)'),
u(_pytest._code.ExceptionInfo())]
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=False):
"""Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
if isinstance(left, py.builtin.bytes):
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
if isinstance(right, py.builtin.bytes):
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [u('Skipping %s identical leading '
'characters in diff, use -v to show') % i]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [u('Skipping %s identical trailing '
'characters in diff, use -v to show') % i]
left = left[:-i]
right = right[:-i]
explanation += [line.strip('\n')
for line in ndiff(left.splitlines(),
right.splitlines())]
return explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u('Use -v to get the full diff')]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u('Full diff:')]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u('Full diff (fallback to calling repr on each item):')]
explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u('At index %s diff: %r != %r')
% (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [u('Left contains more items, first extra item: %s')
% py.io.saferepr(left[len(right)],)]
elif len(left) < len(right):
explanation += [
u('Right contains more items, first extra item: %s') %
py.io.saferepr(right[len(left)],)]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u('Extra items in the left set:'))
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u('Extra items in the right set:'))
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = dict((k, left[k]) for k in common if left[k] == right[k])
if same and not verbose:
explanation += [u('Omitting %s identical items, use -v to show') %
len(same)]
elif same:
explanation += [u('Common items:')]
explanation += pprint.pformat(same).splitlines()
diff = set(k for k in common if left[k] != right[k])
if diff:
explanation += [u('Differing items:')]
for k in diff:
explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
py.io.saferepr({k: right[k]})]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u('Left contains more items:'))
explanation.extend(pprint.pformat(
dict((k, left[k]) for k in extra_left)).splitlines())
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u('Right contains more items:'))
explanation.extend(pprint.pformat(
dict((k, right[k]) for k in extra_right)).splitlines())
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index+len(term):]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u('Skipping')):
continue
if line.startswith(u('- ')):
continue
if line.startswith(u('+ ')):
newdiff.append(u(' ') + line[2:])
else:
newdiff.append(line)
return newdiff
| mpl-2.0 |
etingof/pyasn1-modules | tests/test_rfc5958.py | 2 | 2860 | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc5958
from pyasn1_modules import rfc8410
class PrivateKeyTestCase(unittest.TestCase):
priv_key_pem_text = """\
MHICAQEwBQYDK2VwBCIEINTuctv5E1hK1bbY8fdp+K06/nwoy/HU++CXqI9EdVhC
oB8wHQYKKoZIhvcNAQkJFDEPDA1DdXJkbGUgQ2hhaXJzgSEAGb9ECWmEzf6FQbrB
Z9w7lshQhqowtrbLDFw4rXAxZuE=
"""
def setUp(self):
self.asn1Spec = rfc5958.PrivateKeyInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.priv_key_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(
rfc8410.id_Ed25519, asn1Object['privateKeyAlgorithm']['algorithm'])
self.assertTrue(asn1Object['privateKey'].isValue)
self.assertEqual(
"0x0420d4ee", asn1Object['privateKey'].prettyPrint()[0:10])
self.assertTrue(asn1Object['publicKey'].isValue)
self.assertEqual(
"1164575857", asn1Object['publicKey'].prettyPrint()[0:10])
self.assertEqual(substrate, der_encoder(asn1Object))
class PrivateKeyOpenTypesTestCase(unittest.TestCase):
asymmetric_key_pkg_pem_text = """\
MIGEBgpghkgBZQIBAk4FoHYwdDByAgEBMAUGAytlcAQiBCDU7nLb+RNYStW22PH3
afitOv58KMvx1Pvgl6iPRHVYQqAfMB0GCiqGSIb3DQEJCRQxDwwNQ3VyZGxlIENo
YWlyc4EhABm/RAlphM3+hUG6wWfcO5bIUIaqMLa2ywxcOK1wMWbh
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.asymmetric_key_pkg_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertIn(
rfc5958.id_ct_KP_aKeyPackage, rfc5652.cmsContentTypesMap)
oneKey = asn1Object['content'][0]
self.assertEqual(
rfc8410.id_Ed25519, oneKey['privateKeyAlgorithm']['algorithm'])
pkcs_9_at_friendlyName = univ.ObjectIdentifier('1.2.840.113549.1.9.9.20')
self.assertEqual(
pkcs_9_at_friendlyName, oneKey['attributes'][0]['attrType'])
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| bsd-2-clause |
goldcoin/gldcoin | BuildDeps/deps/boost/tools/quickbook/test/python/output-deps.py | 6 | 4511 | #!/usr/bin/env python
import sys, os, subprocess, tempfile, re
def main(args, directory):
if len(args) != 1:
print "Usage: output-deps.py quickbook-command"
exit(1)
quickbook_command = args[0]
failures = 0
failures += run_quickbook(quickbook_command, 'svg_missing.qbk',
deps_gold = 'svg_missing_deps.txt')
failures += run_quickbook(quickbook_command, 'svg_missing.qbk',
locations_gold = 'svg_missing_locs.txt')
failures += run_quickbook(quickbook_command, 'missing_relative.qbk',
deps_gold = 'missing_relative_deps.txt',
locations_gold = 'missing_relative_locs.txt')
failures += run_quickbook(quickbook_command, 'include_path.qbk',
deps_gold = 'include_path_deps.txt',
locations_gold = 'include_path_locs.txt',
input_path = ['sub1', 'sub2'])
if failures == 0:
print "Success"
else:
print "Failures:",failures
exit(failures)
def run_quickbook(quickbook_command, filename, output_gold = None,
deps_gold = None, locations_gold = None, input_path = []):
failures = 0
command = [quickbook_command, '--debug', filename]
output_filename = None
if output_gold:
output_filename = temp_filename('.qbk')
command.extend(['--output-file', output_filename])
deps_filename = None
if deps_gold:
deps_filename = temp_filename('.txt')
command.extend(['--output-deps', deps_filename])
locations_filename = None
if locations_gold:
locations_filename = temp_filename('.txt')
command.extend(['--output-checked-locations', locations_filename])
try:
for path in input_path:
command.extend(['-I', path])
print 'Running: ' + ' '.join(command)
print
exit_code = subprocess.call(command)
print
success = not exit_code
if output_filename:
output = load_file(output_filename)
else:
output = None
if deps_filename:
deps = load_dependencies(deps_filename)
else:
deps = None
if locations_filename:
locations = load_locations(locations_filename)
else:
locations = None
finally:
if output_filename: os.unlink(output_filename)
if deps_filename: os.unlink(deps_filename)
if deps_gold:
gold = load_dependencies(deps_gold, adjust_paths = True)
if deps != gold:
failures = failures + 1
print "Dependencies don't match:"
print "Gold:", gold
print "Result:", deps
print
if locations_gold:
gold = load_locations(locations_gold, adjust_paths = True)
if locations != gold:
failures = failures + 1
print "Dependencies don't match:"
print "Gold:", gold
print "Result:", locations
print
if output_gold:
gold = load_file(output_gold)
if gold != output:
failures = failures + 1
print "Output doesn't match:"
print
print gold
print
print output
print
return failures
def load_dependencies(filename, adjust_paths = False):
dependencies = set()
f = open(filename, 'r')
for path in f:
if adjust_paths:
path = os.path.realpath(path)
if path in dependencies:
raise Exception("Duplicate path (%1s) in %2s" % (path, filename))
dependencies.add(path)
return dependencies
def load_locations(filename, adjust_paths = False):
line_matcher = re.compile("^([+-]) (.*)$")
dependencies = {}
f = open(filename, 'r')
for line in f:
m = line_matcher.match(line)
if not m:
raise Exception("Invalid dependency file: %1s" % filename)
found = m.group(1) == '+'
path = m.group(2)
if adjust_paths:
path = os.path.realpath(path)
if path in dependencies:
raise Exception("Duplicate path (%1s) in %2s" % (path, filename))
dependencies[path] = found
return dependencies
def temp_filename(extension):
file = tempfile.mkstemp(suffix = extension)
os.close(file[0])
return file[1]
def load_file(filename):
f = open(filename, 'r')
try:
return f.read()
finally:
f.close()
return None
main(sys.argv[1:], os.path.dirname(sys.argv[0]))
| mit |
sudosurootdev/external_chromium_org | tools/telemetry/telemetry/core/browser_credentials_unittest.py | 47 | 2272 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import tempfile
import unittest
from telemetry.core import browser_credentials
SIMPLE_CREDENTIALS_STRING = """
{
"google": {
"username": "example",
"password": "asdf"
}
}
"""
class BackendStub(object):
def __init__(self, credentials_type):
self.login_needed_called = None
self.login_no_longer_needed_called = None
self.credentials_type = credentials_type
def LoginNeeded(self, config, _, tab):
self.login_needed_called = (config, tab)
return True
def LoginNoLongerNeeded(self, tab):
self.login_no_longer_needed_called = (tab, )
class TestBrowserCredentials(unittest.TestCase):
def testCredentialsInfrastructure(self):
google_backend = BackendStub("google")
othersite_backend = BackendStub("othersite")
browser_cred = browser_credentials.BrowserCredentials(
[google_backend,
othersite_backend])
try:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(SIMPLE_CREDENTIALS_STRING)
browser_cred.credentials_path = f.name
# Should true because it has a password and a backend.
self.assertTrue(browser_cred.CanLogin('google'))
# Should be false succeed because it has no password.
self.assertFalse(browser_cred.CanLogin('othersite'))
# Should fail because it has no backend.
self.assertRaises(
Exception,
lambda: browser_cred.CanLogin('foobar'))
tab = {}
ret = browser_cred.LoginNeeded(tab, 'google')
self.assertTrue(ret)
self.assertTrue(google_backend.login_needed_called is not None)
self.assertEqual(tab, google_backend.login_needed_called[0])
self.assertEqual("example",
google_backend.login_needed_called[1]["username"])
self.assertEqual("asdf",
google_backend.login_needed_called[1]["password"])
browser_cred.LoginNoLongerNeeded(tab, 'google')
self.assertTrue(google_backend.login_no_longer_needed_called is not None)
self.assertEqual(tab, google_backend.login_no_longer_needed_called[0])
finally:
os.remove(f.name)
| bsd-3-clause |
ArianaGashi/Techstitution | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py | 329 | 13942 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# pylint:disable=not-callable
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
if not exactNode:
if isinstance(target, text_type):
target = (namespaces["html"], target)
assert isinstance(target, tuple)
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if exactNode and node == target:
return True
elif not exactNode and node.nameTuple == target:
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and
name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
| cc0-1.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/autobuilder/buildsteps/BuildToolchainImages.py | 3 | 1532 | '''
Created on Jan 6, 2013
__author__ = "Elizabeth 'pidge' Flanagan"
__copyright__ = "Copyright 2012-2013, Intel Corp."
__credits__ = ["Elizabeth Flanagan"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "Elizabeth Flanagan"
__email__ = "[email protected]"
'''
from buildbot.steps.shell import ShellCommand
from buildbot.process.buildstep import LogLineObserver
import os
class BuildToolchainImages(ShellCommand):
haltOnFailure = False
flunkOnFailure = True
name = "Building Toolchain Images"
def __init__(self, factory, argdict=None, **kwargs):
self.images=""
self._pendingLogObservers = []
self.factory = factory
for k, v in argdict.iteritems():
setattr(self, k, v)
# Timeout needs to be passed to LoggingBuildStep as a kwarg
self.timeout = 100000
kwargs['timeout']=self.timeout
ShellCommand.__init__(self, **kwargs)
def start(self):
layerversion = self.getProperty("layerversion_core")
if int(layerversion) > 1:
self.description = ["Building core-image-sato -c populate_sdk"]
self.command = ". ./oe-init-build-env; bitbake core-image-sato -c populate_sdk"
else:
self.description = ["Building meta-toolchain-gmae"]
self.command = ". ./oe-init-build-env; bitbake meta-toolchain-gmae"
ShellCommand.start(self)
def describe(self, done=False):
description = ShellCommand.describe(self,done)
return description
| gpl-2.0 |
andersonresende/django | django/contrib/gis/gdal/srs.py | 11 | 12004 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr and capi:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name.decode()
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr and capi:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
stefanseefeld/qmtest | qm/test/classes/previous_testrun.py | 2 | 1816 | ########################################################################
#
# File: previous_testrun.py
# Author: Stefan Seefeld
# Date: 2006-11-16
#
# Contents:
# QMTest PreviousTestRun extension class.
#
# Copyright (c) 2006 by CodeSourcery, Inc. All rights reserved.
#
# For license terms see the file COPYING.
#
########################################################################
########################################################################
# Imports
########################################################################
from qm.fields import TextField, PythonField
from qm.test.expectation_database import ExpectationDatabase
from qm.test.result import Result
from qm.test.base import load_results
########################################################################
# Classes
########################################################################
class PreviousTestRun(ExpectationDatabase):
"""A 'PreviousTestRun' uses test results as expectations.
A 'PreviousTestRun' uses a ResultsFile such as generated by
'qmtest run' to determine the expected outcome for the current
test run."""
file_name = TextField(description="The name of the results file.")
results_file = PythonField("The results file.")
def __init__(self, **args):
super(PreviousTestRun, self).__init__(**args)
if not self.results_file:
self.results_file = open(self.file_name, "rb")
results = load_results(self.results_file, self.test_database)
self._results = {}
for r in results:
# Keep test results only.
if r.GetKind() == Result.TEST:
self._results[r.GetId()] = r
def Lookup(self, test_id):
return self._results.get(test_id) or Result(Result.TEST, test_id)
| gpl-2.0 |
mark-me/Pi-Jukebox | venv/Lib/site-packages/pygame/freetype.py | 1 | 1814 | """Enhanced Pygame module for loading and rendering computer fonts"""
import sys
from pygame._freetype import (
Font,
STYLE_NORMAL, STYLE_OBLIQUE, STYLE_STRONG, STYLE_UNDERLINE, STYLE_WIDE,
STYLE_DEFAULT,
init, quit, get_init,
was_init, get_cache_size, get_default_font, get_default_resolution,
get_error, get_version, set_default_resolution,
_PYGAME_C_API, __PYGAMEinit__,
)
from pygame.sysfont import match_font, get_fonts, SysFont as _SysFont
from pygame import compat
def SysFont(name, size, bold=0, italic=0, constructor=None):
"""pygame.ftfont.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font
create a pygame Font from system font resources
This will search the system fonts for the given font
name. You can also enable bold or italic styles, and
the appropriate system font will be selected if available.
This will always return a valid Font object, and will
fallback on the builtin pygame font if the given font
is not found.
Name can also be a comma separated list of names, in
which case set of names will be searched in order. Pygame
uses a small set of common font aliases, if the specific
font you ask for is not available, a reasonable alternative
may be used.
if optional contructor is provided, it must be a function with
signature constructor(fontpath, size, bold, italic) which returns
a Font instance. If None, a pygame.freetype.Font object is created.
"""
if constructor is None:
def constructor(fontpath, size, bold, italic):
font = Font(fontpath, size)
font.strong = bold
font.oblique = italic
return font
return _SysFont(name, size, bold, italic, constructor)
| agpl-3.0 |
krageon/closure-linter | closure_linter/common/tokenizer.py | 127 | 5827 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based lexer."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
from closure_linter.common import tokens
# Shorthand
Type = tokens.TokenType
class Tokenizer(object):
"""General purpose tokenizer.
Attributes:
mode: The latest mode of the tokenizer. This allows patterns to distinguish
if they are mid-comment, mid-parameter list, etc.
matchers: Dictionary of modes to sequences of matchers that define the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
def __init__(self, starting_mode, matchers, default_types):
"""Initialize the tokenizer.
Args:
starting_mode: Mode to start in.
matchers: Dictionary of modes to sequences of matchers that defines the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
self.__starting_mode = starting_mode
self.matchers = matchers
self.default_types = default_types
def TokenizeFile(self, file):
"""Tokenizes the given file.
Args:
file: An iterable that yields one line of the file at a time.
Returns:
The first token in the file
"""
# The current mode.
self.mode = self.__starting_mode
# The first token in the stream.
self.__first_token = None
# The last token added to the token stream.
self.__last_token = None
# The current line number.
self.__line_number = 0
for line in file:
self.__line_number += 1
self.__TokenizeLine(line)
return self.__first_token
def _CreateToken(self, string, token_type, line, line_number, values=None):
"""Creates a new Token object (or subclass).
Args:
string: The string of input the token represents.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
Returns:
The newly created Token object.
"""
return tokens.Token(string, token_type, line, line_number, values,
line_number)
def __TokenizeLine(self, line):
"""Tokenizes the given line.
Args:
line: The contents of the line.
"""
string = line.rstrip('\n\r\f')
line_number = self.__line_number
self.__start_index = 0
if not string:
self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
return
normal_token = ''
index = 0
while index < len(string):
for matcher in self.matchers[self.mode]:
if matcher.line_start and index > 0:
continue
match = matcher.regex.match(string, index)
if match:
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line,
line_number))
normal_token = ''
# Add the match.
self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
line_number, match.groupdict()))
# Change the mode to the correct one for after this match.
self.mode = matcher.result_mode or self.mode
# Shorten the string to be matched.
index = match.end()
break
else:
# If the for loop finishes naturally (i.e. no matches) we just add the
# first character to the string of consecutive non match characters.
# These will constitute a NORMAL token.
if string:
normal_token += string[index:index + 1]
index += 1
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line, line_number))
def __CreateNormalToken(self, mode, string, line, line_number):
"""Creates a normal token.
Args:
mode: The current mode.
string: The string to tokenize.
line: The line of text.
line_number: The line number within the file.
Returns:
A Token object, of the default type for the current mode.
"""
type = Type.NORMAL
if mode in self.default_types:
type = self.default_types[mode]
return self._CreateToken(string, type, line, line_number)
def __AddToken(self, token):
"""Add the given token to the token stream.
Args:
token: The token to add.
"""
# Store the first token, or point the previous token to this one.
if not self.__first_token:
self.__first_token = token
else:
self.__last_token.next = token
# Establish the doubly linked list
token.previous = self.__last_token
self.__last_token = token
# Compute the character indices
token.start_index = self.__start_index
self.__start_index += token.length
| apache-2.0 |
wangpanjun/doit | tests/test_filewatch.py | 5 | 3473 | import os
import time
import threading
import pytest
from doit.filewatch import FileModifyWatcher, get_platform_system
def testUnsuportedPlatform(monkeypatch):
monkeypatch.setattr(FileModifyWatcher, 'supported_platforms', ())
pytest.raises(Exception, FileModifyWatcher, [])
platform = get_platform_system()
@pytest.mark.skipif('platform not in FileModifyWatcher.supported_platforms')
class TestFileWatcher(object):
def testInit(self, restore_cwd, tmpdir):
dir1 = 'data3'
files = ('data/w1.txt', 'data/w2.txt')
tmpdir.mkdir('data')
for fname in files:
tmpdir.join(fname).open('a').close()
os.chdir(tmpdir.strpath)
fw = FileModifyWatcher((files[0], files[1], dir1))
# file_list contains absolute paths
assert 2 == len(fw.file_list)
assert os.path.abspath(files[0]) in fw.file_list
assert os.path.abspath(files[1]) in fw.file_list
# watch_dirs
assert 2 == len(fw.watch_dirs)
assert tmpdir.join('data') in fw.watch_dirs
assert tmpdir.join('data3') in fw.watch_dirs
# notify_dirs
assert 1 == len(fw.notify_dirs)
assert tmpdir.join('data3') in fw.notify_dirs
def testHandleEventNotSubclassed(self):
fw = FileModifyWatcher([])
pytest.raises(NotImplementedError, fw.handle_event, None)
def testLoop(self, restore_cwd, tmpdir):
files = ['data/w1.txt', 'data/w2.txt', 'data/w3.txt']
stop_file = 'data/stop'
tmpdir.mkdir('data')
for fname in files + [stop_file]:
tmpdir.join(fname).open('a').close()
os.chdir(tmpdir.strpath)
fw = FileModifyWatcher((files[0], files[1], stop_file))
events = []
should_stop = []
started = []
def handle_event(event):
events.append(event.pathname)
if event.pathname.endswith("stop"):
should_stop.append(True)
fw.handle_event = handle_event
def loop_callback(notifier):
started.append(True)
# force loop to stop
if should_stop:
raise KeyboardInterrupt
loop_thread = threading.Thread(target=fw.loop, args=(loop_callback,))
loop_thread.daemon = True
loop_thread.start()
# wait watcher to be ready
while not started: # pragma: no cover
time.sleep(0.01)
assert loop_thread.isAlive()
# write in watched file
fd = open(files[0], 'w')
fd.write("hi")
fd.close()
# write in non-watched file
fd = open(files[2], 'w')
fd.write("hi")
fd.close()
# write in another watched file
fd = open(files[1], 'w')
fd.write("hi")
fd.close()
# tricky to stop watching
fd = open(stop_file, 'w')
fd.write("hi")
fd.close()
time.sleep(0.1)
loop_thread.join(1)
if loop_thread.isAlive(): # pragma: no cover
# this test is very flaky so we give it one more chance...
# write on file to terminate thread
fd = open(stop_file, 'w')
fd.write("hi")
fd.close()
loop_thread.join(1)
if loop_thread.is_alive(): # pragma: no cover
raise Exception("thread not terminated")
assert os.path.abspath(files[0]) == events[0]
assert os.path.abspath(files[1]) == events[1]
| mit |
fjbatresv/odoo | addons/google_account/__openerp__.py | 312 | 1457 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Users',
'version': '1.0',
'category': 'Tools',
'description': """
The module adds google user in res user.
========================================
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base_setup'],
'data': [
'google_account_data.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bonitadecker77/python-for-android | python3-alpha/python3-src/Lib/xml/dom/domreg.py | 51 | 3463 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
from xml.dom.minicompat import * # isinstance, StringTypes
# This is a list of well-known implementations. Well-known names
# should be published by posting to [email protected], and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name=None, features=()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, str):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except Exception: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError("no suitable DOM implementation found")
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError("bad feature name: %r" % (feature,))
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| apache-2.0 |
pshen/ansible | contrib/vault/vault-keyring.py | 119 | 3430 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2014, Matt Martz <[email protected]>
# (c) 2016, Justin Mayer <https://justinmayer.com/>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# =============================================================================
#
# This script is to be used with vault_password_file or --vault-password-file
# to retrieve the vault password via your OS's native keyring application.
#
# This file *MUST* be saved with executable permissions. Otherwise, Ansible
# will try to parse as a password file and display: "ERROR! Decryption failed"
#
# The `keyring` Python module is required: https://pypi.python.org/pypi/keyring
#
# By default, this script will store the specified password in the keyring of
# the user that invokes the script. To specify a user keyring, add a [vault]
# section to your ansible.cfg file with a 'username' option. Example:
#
# [vault]
# username = 'ansible-vault'
#
# Another optional setting is for the key name, which allows you to use this
# script to handle multiple project vaults with different passwords:
#
# [vault]
# keyname = 'ansible-vault-yourproject'
#
# You can configure the `vault_password_file` option in ansible.cfg:
#
# [defaults]
# ...
# vault_password_file = /path/to/vault-keyring.py
# ...
#
# To set your password, `cd` to your project directory and run:
#
# python /path/to/vault-keyring.py set
#
# If you choose not to configure the path to `vault_password_file` in
# ansible.cfg, your `ansible-playbook` command might look like:
#
# ansible-playbook --vault-password-file=/path/to/vault-keyring.py site.yml
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import sys
import getpass
import keyring
import ansible.constants as C
def main():
(parser, config_path) = C.load_config_file()
if parser.has_option('vault', 'username'):
username = parser.get('vault', 'username')
else:
username = getpass.getuser()
if parser.has_option('vault', 'keyname'):
keyname = parser.get('vault', 'keyname')
else:
keyname = 'ansible'
if len(sys.argv) == 2 and sys.argv[1] == 'set':
intro = 'Storing password in "{}" user keyring using key name: {}\n'
sys.stdout.write(intro.format(username, keyname))
password = getpass.getpass()
confirm = getpass.getpass('Confirm password: ')
if password == confirm:
keyring.set_password(keyname, username, password)
else:
sys.stderr.write('Passwords do not match\n')
sys.exit(1)
else:
sys.stdout.write('{}\n'.format(keyring.get_password(keyname,
username)))
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-3.0 |
bdrung/audacity | lib-src/lv2/serd/waflib/Runner.py | 330 | 4483 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import random,atexit
try:
from queue import Queue
except ImportError:
from Queue import Queue
from waflib import Utils,Task,Errors,Logs
GAP=10
class TaskConsumer(Utils.threading.Thread):
def __init__(self):
Utils.threading.Thread.__init__(self)
self.ready=Queue()
self.setDaemon(1)
self.start()
def run(self):
try:
self.loop()
except Exception:
pass
def loop(self):
while 1:
tsk=self.ready.get()
if not isinstance(tsk,Task.TaskBase):
tsk(self)
else:
tsk.process()
pool=Queue()
def get_pool():
try:
return pool.get(False)
except Exception:
return TaskConsumer()
def put_pool(x):
pool.put(x)
def _free_resources():
global pool
lst=[]
while pool.qsize():
lst.append(pool.get())
for x in lst:
x.ready.put(None)
for x in lst:
x.join()
pool=None
atexit.register(_free_resources)
class Parallel(object):
def __init__(self,bld,j=2):
self.numjobs=j
self.bld=bld
self.outstanding=[]
self.frozen=[]
self.out=Queue(0)
self.count=0
self.processed=1
self.stop=False
self.error=[]
self.biter=None
self.dirty=False
def get_next_task(self):
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self,tsk):
if random.randint(0,1):
self.frozen.insert(0,tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
while self.count>self.numjobs*GAP:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
elif self.frozen:
try:
cond=self.deadlock==self.processed
except AttributeError:
pass
else:
if cond:
msg='check the build order for the tasks'
for tsk in self.frozen:
if not tsk.run_after:
msg='check the methods runnable_status'
break
lst=[]
for tsk in self.frozen:
lst.append('%s\t-> %r'%(repr(tsk),[id(x)for x in tsk.run_after]))
raise Errors.WafError('Deadlock detected: %s%s'%(msg,''.join(lst)))
self.deadlock=self.processed
if self.frozen:
self.outstanding+=self.frozen
self.frozen=[]
elif not self.count:
self.outstanding.extend(self.biter.next())
self.total=self.bld.total()
break
def add_more_tasks(self,tsk):
if getattr(tsk,'more_tasks',None):
self.outstanding+=tsk.more_tasks
self.total+=len(tsk.more_tasks)
def get_out(self):
tsk=self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count-=1
self.dirty=True
return tsk
def error_handler(self,tsk):
if not self.bld.keep:
self.stop=True
self.error.append(tsk)
def add_task(self,tsk):
try:
self.pool
except AttributeError:
self.init_task_pool()
self.ready.put(tsk)
def init_task_pool(self):
pool=self.pool=[get_pool()for i in range(self.numjobs)]
self.ready=Queue(0)
def setq(consumer):
consumer.ready=self.ready
for x in pool:
x.ready.put(setq)
return pool
def free_task_pool(self):
def setq(consumer):
consumer.ready=Queue(0)
self.out.put(self)
try:
pool=self.pool
except AttributeError:
pass
else:
for x in pool:
self.ready.put(setq)
for x in pool:
self.get_out()
for x in pool:
put_pool(x)
self.pool=[]
def start(self):
self.total=self.bld.total()
while not self.stop:
self.refill_task_list()
tsk=self.get_next_task()
if not tsk:
if self.count:
continue
else:
break
if tsk.hasrun:
self.processed+=1
continue
if self.stop:
break
try:
st=tsk.runnable_status()
except Exception:
self.processed+=1
tsk.err_msg=Utils.ex_stack()
if not self.stop and self.bld.keep:
tsk.hasrun=Task.SKIPPED
if self.bld.keep==1:
if Logs.verbose>1 or not self.error:
self.error.append(tsk)
self.stop=True
else:
if Logs.verbose>1:
self.error.append(tsk)
continue
tsk.hasrun=Task.EXCEPTION
self.error_handler(tsk)
continue
if st==Task.ASK_LATER:
self.postpone(tsk)
elif st==Task.SKIP_ME:
self.processed+=1
tsk.hasrun=Task.SKIPPED
self.add_more_tasks(tsk)
else:
tsk.position=(self.processed,self.total)
self.count+=1
tsk.master=self
self.processed+=1
if self.numjobs==1:
tsk.process()
else:
self.add_task(tsk)
while self.error and self.count:
self.get_out()
assert(self.count==0 or self.stop)
self.free_task_pool()
| gpl-2.0 |
HydrelioxGitHub/home-assistant | homeassistant/components/neato/switch.py | 2 | 3290 | """Support for Neato Connected Vacuums switches."""
import logging
from datetime import timedelta
import requests
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.components.neato import NEATO_ROBOTS, NEATO_LOGIN
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['neato']
SCAN_INTERVAL = timedelta(minutes=10)
SWITCH_TYPE_SCHEDULE = 'schedule'
SWITCH_TYPES = {
SWITCH_TYPE_SCHEDULE: ['Schedule']
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Neato switches."""
dev = []
for robot in hass.data[NEATO_ROBOTS]:
for type_name in SWITCH_TYPES:
dev.append(NeatoConnectedSwitch(hass, robot, type_name))
_LOGGER.debug("Adding switches %s", dev)
add_entities(dev)
class NeatoConnectedSwitch(ToggleEntity):
"""Neato Connected Switches."""
def __init__(self, hass, robot, switch_type):
"""Initialize the Neato Connected switches."""
self.type = switch_type
self.robot = robot
self.neato = hass.data[NEATO_LOGIN]
self._robot_name = '{} {}'.format(
self.robot.name, SWITCH_TYPES[self.type][0])
try:
self._state = self.robot.state
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as ex:
_LOGGER.warning("Neato connection error: %s", ex)
self._state = None
self._schedule_state = None
self._clean_state = None
self._robot_serial = self.robot.serial
def update(self):
"""Update the states of Neato switches."""
_LOGGER.debug("Running switch update")
self.neato.update_robots()
try:
self._state = self.robot.state
except (requests.exceptions.ConnectionError,
requests.exceptions.HTTPError) as ex:
_LOGGER.warning("Neato connection error: %s", ex)
self._state = None
return
_LOGGER.debug('self._state=%s', self._state)
if self.type == SWITCH_TYPE_SCHEDULE:
_LOGGER.debug("State: %s", self._state)
if self._state['details']['isScheduleEnabled']:
self._schedule_state = STATE_ON
else:
self._schedule_state = STATE_OFF
_LOGGER.debug("Schedule state: %s", self._schedule_state)
@property
def name(self):
"""Return the name of the switch."""
return self._robot_name
@property
def available(self):
"""Return True if entity is available."""
return self._state
@property
def unique_id(self):
"""Return a unique ID."""
return self._robot_serial
@property
def is_on(self):
"""Return true if switch is on."""
if self.type == SWITCH_TYPE_SCHEDULE:
if self._schedule_state == STATE_ON:
return True
return False
def turn_on(self, **kwargs):
"""Turn the switch on."""
if self.type == SWITCH_TYPE_SCHEDULE:
self.robot.enable_schedule()
def turn_off(self, **kwargs):
"""Turn the switch off."""
if self.type == SWITCH_TYPE_SCHEDULE:
self.robot.disable_schedule()
| apache-2.0 |
wweiradio/zulip | zerver/lib/statistics.py | 115 | 4695 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from zerver.models import UserProfile, UserActivity, UserActivityInterval, Message
from django.utils.timezone import utc
from datetime import timedelta
from itertools import chain
def median(data):
data = sorted(data)
size = len(data)
if size % 2 == 1:
return data[size//2]
else:
before = size//2 - 1
after = size//2
return (data[before] + data[after]) / 2.0
users_who_sent_query = Message.objects.select_related("sender") \
.exclude(sending_client__name__contains="mirror") \
.exclude(sending_client__name__contains="API")
def active_users():
# Return a list of active users we want to count towards various
# statistics.
return UserProfile.objects.filter(is_bot=False, is_active=True).select_related()
def users_who_sent_between(begin, end):
sender_objs = users_who_sent_query.filter(pub_date__gt=begin, pub_date__lt=end) \
.values("sender__id")
return set(s["sender__id"] for s in sender_objs)
def users_who_sent_ever():
return set(s["sender__id"] for s in users_who_sent_query.values("sender__id"))
def active_users_to_measure():
senders = users_who_sent_ever()
return [u for u in active_users() if u.id in senders]
def active_users_who_sent_between(begin, end):
senders = users_who_sent_between(begin, end)
return [u for u in active_users() if u.id in senders]
# Return the amount of Zulip usage for this user between the two
# given dates
def seconds_usage_between(user_profile, begin, end):
intervals = UserActivityInterval.objects.filter(user_profile=user_profile, end__gte=begin, start__lte=end)
duration = timedelta(0)
for interval in intervals:
start = max(begin, interval.start)
finish = min(end, interval.end)
duration += finish-start
return duration
# Return a list of how many seconds each user has been engaging with the app on a given day
def seconds_active_during_day(day):
begin_day = day.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=utc)
end_day = day.replace(hour=23, minute=59, second=59, microsecond=0, tzinfo=utc)
active_users = active_users_to_measure()
return [seconds_usage_between(user, begin_day, end_day).total_seconds() for user in active_users]
def users_active_nosend_during_day(day):
begin_day = day.replace(hour=0, minute=0, second=0, microsecond=0, tzinfo=utc)
end_day = day.replace(hour=23, minute=59, second=59, microsecond=0, tzinfo=utc)
active_users = active_users_to_measure()
today_senders = users_who_sent_between(begin_day, end_day)
today_users = []
for user_profile in active_users:
intervals = UserActivityInterval.objects.filter(user_profile=user_profile,
end__gte=begin_day,
start__lte=end_day)
if len(intervals) != 0:
today_users.append(user_profile)
return [u for u in today_users if not u.id in today_senders]
def calculate_stats(data, all_users):
if len(data) == 0:
return {"# data points": 0}
active_user_count = len([x for x in data if x > 1])
mean_data = sum(data) / active_user_count
median_data = median([x for x in data if x > 1])
return {'active users': active_user_count,
'total users': len(all_users),
'mean': str(timedelta(seconds=mean_data)),
'median': str(timedelta(seconds=median_data)),
'# data points': len(data)}
# Return an info dict {mean: , median} containing the mean/median seconds users were active on a given day
def activity_averages_during_day(day):
users_to_measure = active_users_to_measure()
seconds_active = seconds_active_during_day(day)
return calculate_stats(seconds_active, all_users=users_to_measure)
# Returns an info dict {mean: , median} with engagement numbers for all users according
# to active_users_to_measure.
def activity_averages_between(begin, end, by_day=True):
seconds_active = {}
users_to_measure = active_users_to_measure()
for i in range((end - begin).days):
day = begin + timedelta(days=i)
# Ignore weekends
if day.weekday() in [5, 6]:
continue
seconds_active[day] = seconds_active_during_day(day)
if by_day:
return dict((day, calculate_stats(values, all_users=users_to_measure))
for day, values in seconds_active.iteritems())
else:
return calculate_stats(list(chain.from_iterable(seconds_active.values())),
all_users=users_to_measure)
| apache-2.0 |
nakul02/systemml | bin/utils.py | 15 | 3455 | #!/usr/bin/env python
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import sys
import os
from os.path import join, exists
from os import environ
import shutil
def get_env_systemml_home():
"""
Env variable error check and path location
return: String
Location of SYSTEMML_HOME
"""
systemml_home = os.environ.get('SYSTEMML_HOME')
if systemml_home is None:
print('SYSTEMML_HOME not found')
sys.exit()
return systemml_home
def get_env_spark_home():
"""
Env variable error check and path location
return: String
Location of SPARK_HOME
"""
spark_home = environ.get('SPARK_HOME')
if spark_home is None:
print('SPARK_HOME not found')
sys.exit()
return spark_home
def find_file(name, path):
"""
Responsible for finding a specific file recursively given a location
"""
for root, dirs, files in os.walk(path):
if name in files:
return join(root, name)
def find_dml_file(systemml_home, script_file):
"""
Find the location of DML script being executed
return: String
Location of the dml script
"""
scripts_dir = join(systemml_home, 'scripts')
if not exists(script_file):
script_file_path = find_file(script_file, scripts_dir)
if script_file_path is not None:
return script_file_path
else:
print('Could not find DML script: ' + script_file)
sys.exit()
return script_file
def log4j_path(systemml_home):
"""
Create log4j.properties from the template if not exist
return: String
Location of log4j.properties path
"""
log4j_properties_path = join(systemml_home, 'conf', 'log4j.properties')
log4j_template_properties_path = join(systemml_home, 'conf', 'log4j.properties.template')
if not (exists(log4j_properties_path)):
shutil.copyfile(log4j_template_properties_path, log4j_properties_path)
print('... created ' + log4j_properties_path)
return log4j_properties_path
def config_path(systemml_home):
"""
Create SystemML-config from the template if not exist
return: String
Location of SystemML-config.xml
"""
systemml_config_path = join(systemml_home, 'conf', 'SystemML-config.xml')
systemml_template_config_path = join(systemml_home, 'conf', 'SystemML-config.xml.template')
if not (exists(systemml_config_path)):
shutil.copyfile(systemml_template_config_path, systemml_config_path)
print('... created ' + systemml_config_path)
return systemml_config_path
| apache-2.0 |
EthanHeilman/bitcoin | test/functional/rpc_generate.py | 36 | 1188 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test generate RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class RPCGenerateTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
message = (
"generate\n"
"has been replaced by the -generate "
"cli option. Refer to -help for more information."
)
self.log.info("Test rpc generate raises with message to use cli option")
assert_raises_rpc_error(-32601, message, self.nodes[0].rpc.generate)
self.log.info("Test rpc generate help prints message to use cli option")
assert_equal(message, self.nodes[0].help("generate"))
self.log.info("Test rpc generate is a hidden command not discoverable in general help")
assert message not in self.nodes[0].help()
if __name__ == "__main__":
RPCGenerateTest().main()
| mit |
agileblaze/OpenStackTwoFactorAuthentication | openstack_dashboard/dashboards/admin/metering/tests.py | 24 | 7560 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.test.test_data import utils as test_utils
INDEX_URL = reverse('horizon:admin:metering:index')
CREATE_URL = reverse('horizon:admin:metering:create')
SAMPLES_URL = reverse('horizon:admin:metering:samples')
class MeteringViewTests(test.BaseAdminViewTests):
def test_create_report_page(self):
formData = {'period': 7}
res = self.client.get(CREATE_URL)
self.assertTemplateUsed(res, 'admin/metering/daily.html')
res = self.client.post(CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_create_report_dates_messed_up(self):
# dates are swapped in create report form
formData = {'period': 'other',
'date_to': '2014-01-01',
'date_from': '2014-02-02'}
res = self.client.post(CREATE_URL, formData)
self.assertFormError(res, "form", "date_to",
['Start must be earlier than end of period.'])
def test_create_report_date_missing(self):
formData = {'period': 'other',
'date_to': '2014-01-01',
'date_from': ''}
res = self.client.post(CREATE_URL, formData)
self.assertFormError(res, "form", "date_from",
['Must specify start of period'])
class MeteringLineChartTabTests(test.BaseAdminViewTests):
def setUp(self):
test.BaseAdminViewTests.setUp(self)
self.testdata = test_utils.TestData()
test_utils.load_test_data(self.testdata)
def _verify_series(self, series, value, date, expected_names):
data = json.loads(series)
self.assertTrue('series' in data)
self.assertEqual(len(data['series']), len(expected_names))
for d in data['series']:
self.assertTrue('data' in d)
self.assertEqual(len(d['data']), 1)
self.assertAlmostEqual(d['data'][0].get('y'), value)
self.assertEqual(d['data'][0].get('x'), date)
self.assertEqual(d.get('unit'), '')
self.assertIn(d.get('name'), expected_names)
expected_names.remove(d.get('name'))
self.assertEqual(data.get('settings'), {})
@test.create_stubs({api.keystone: ('tenant_list',),
api.ceilometer: ('meter_list',
'statistic_list',
), })
def test_stats_for_line_chart(self):
api.ceilometer.meter_list(IsA(http.HttpRequest))\
.AndReturn(self.testdata.meters.list())
api.ceilometer.statistic_list(IsA(http.HttpRequest),
'memory',
period=IsA(int),
query=IsA(list)).MultipleTimes()\
.AndReturn(self.testdata.statistics.list())
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=False) \
.AndReturn([self.testdata.tenants.list(), False])
self.mox.ReplayAll()
# get all statistics of project aggregates
res = self.client.get(
reverse('horizon:admin:metering:samples') +
"?meter=memory&group_by=project&stats_attr=avg&date_options=7")
self.assertEqual(res._headers['content-type'],
('Content-Type', 'application/json'))
expected_names = ['test_tenant',
'disabled_tenant',
u'\u4e91\u89c4\u5219']
self._verify_series(res._container[0], 4.55, '2012-12-21T11:00:55',
expected_names)
@test.create_stubs({api.keystone: ('tenant_list',),
api.ceilometer: ('meter_list',
'statistic_list',
), })
def test_stats_for_line_chart_attr_max(self):
api.ceilometer.meter_list(IsA(http.HttpRequest))\
.AndReturn(self.testdata.meters.list())
api.ceilometer.statistic_list(IsA(http.HttpRequest),
'memory', period=IsA(int),
query=IsA(list))\
.MultipleTimes().AndReturn(self.testdata.statistics.list())
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=False) \
.AndReturn([self.testdata.tenants.list(), False])
self.mox.ReplayAll()
# get all statistics of project aggregates
res = self.client.get(
reverse('horizon:admin:metering:samples') +
"?meter=memory&group_by=project&stats_attr=max&date_options=7")
self.assertEqual(res._headers['content-type'],
('Content-Type', 'application/json'))
expected_names = ['test_tenant',
'disabled_tenant',
u'\u4e91\u89c4\u5219']
self._verify_series(res._container[0], 9.0, '2012-12-21T11:00:55',
expected_names)
@test.create_stubs({api.keystone: ('tenant_list',),
api.ceilometer: ('meter_list',
'resource_list',
'statistic_list'
), })
def test_stats_for_line_chart_no_group(self):
api.ceilometer.meter_list(IsA(http.HttpRequest))\
.AndReturn(self.testdata.meters.list())
api.ceilometer.resource_list(IsA(http.HttpRequest), query=None,
ceilometer_usage_object=None)\
.AndReturn(self.testdata.api_resources.list())
api.ceilometer.statistic_list(IsA(http.HttpRequest),
'memory', period=IsA(int),
query=IsA(list))\
.MultipleTimes().AndReturn(self.testdata.statistics.list())
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=False) \
.AndReturn([self.testdata.tenants.list(), False])
self.mox.ReplayAll()
# get all statistics of the meter
res = self.client.get(
reverse('horizon:admin:metering:samples') +
"?meter=memory&stats_attr=max&date_options=7")
self.assertEqual(res._headers['content-type'],
('Content-Type', 'application/json'))
expected_names = ['fake_resource_id3']
self._verify_series(res._container[0], 9.0, '2012-12-21T11:00:55',
expected_names)
| apache-2.0 |
heimdali/ITK | Modules/ThirdParty/GDCM/src/gdcm/Wrapping/Python/TestWrap.py | 17 | 2817 | #!/usr/bin/env python
############################################################################
#
# Program: GDCM (Grassroots DICOM). A DICOM library
# Module: $URL$
#
# Copyright (c) 2006-2010 Mathieu Malaterre
# All rights reserved.
# See Copyright.txt or http://gdcm.sourceforge.net/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
############################################################################
# Loop over all .h file, extract the name since by convention this is the name
# of the class, and then try to load that name in the python shell
import sys,os,stat
import gdcm
blacklist = (
"_j2k" # :)
"_jp2" # :)
"treamimpl" # :)
"TestDriver"
# DataStructureAndEncodingDefinition
"ByteBuffer" # WTF ?
"ExplicitDataElement"
"CP246ExplicitDataElement"
"ImplicitDataElement"
"Element"
"ValueIO"
"ParseException"
"ByteSwapFilter"
"ExplicitImplicitDataElement"
"UNExplicitDataElement"
"UNExplicitImplicitDataElement"
"Attribute"
"VR16ExplicitDataElement"
"LO" # issue with swig
"String"
"CodeString"
"Parser"
# DataDict:
"TagToType"
"GroupDict"
"DictConverter"
# Information thingy :
"MacroEntry"
"XMLDictReader"
"TableReader"
"Table"
"XMLPrivateDictReader"
# Common
"LegacyMacro"
"Swapper"
"SmartPointer"
"Win32"
"StaticAssert"
"DeflateStream"
"Types"
"Exception"
"ByteSwap"
"Terminal"
# MediaStorageAndFileFormat
"ConstCharWrapper"
"ImageConverter"
"SerieHelper"
# Do not expose low level jpeg implementation detail
"JPEG8Codec"
"JPEG12Codec"
"JPEG16Codec"
"JPEG2000Codec"
# For now remove the codec part:
"ImageCodec"
"DeltaEncodingCodec"
"RLECodec"
"RAWCodec"
"AudioCodec"
"EncapsulatedDocument"
"JPEGCodec"
"PVRGCodec"
"KAKADUCodec"
"JPEGLSCodec"
"PNMCodec"
"PDFCodec"
"Decoder"
"Coder"
"ImageChangePhotometricInterpretation"
"IconImage" # FIXME
)
def processonedir(dirname):
gdcmclasses = dir(gdcm)
subtotal = 0
for file in os.listdir(dirname):
#print file[-2:]
if file[-2:] != '.h': continue
#print file[4:-2]
gdcmclass = file[4:-2]
if gdcmclass in gdcmclasses:
print "ok:", gdcmclass
else:
if not gdcmclass in blacklist:
print "not wrapped:",gdcmclass
subtotal += 1
return subtotal
if __name__ == "__main__":
dirname = os.sys.argv[1]
total = 0
for d in os.listdir(dirname):
if d == '.svn': continue
pathname = os.path.join(dirname, d)
#print "pathname:",pathname
#print os.stat(pathname)
mode = os.stat(pathname)[stat.ST_MODE]
if stat.S_ISDIR(mode):
print "processing directory:", pathname
total += processonedir(pathname)
print "number of class not wrap:%d"%total
sys.exit(total)
| apache-2.0 |
Distrotech/antlr4 | runtime/Python3/src/antlr4/tree/RuleTagToken.py | 17 | 3322 | #
# [The "BSD license"]
# Copyright (c) 2013 Terence Parr
# Copyright (c) 2013 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# A {@link Token} object representing an entire subtree matched by a parser
# rule; e.g., {@code <expr>}. These tokens are created for {@link TagChunk}
# chunks where the tag corresponds to a parser rule.
#
from antlr4.Token import Token
class RuleTagToken(Token):
#
# Constructs a new instance of {@link RuleTagToken} with the specified rule
# name, bypass token type, and label.
#
# @param ruleName The name of the parser rule this rule tag matches.
# @param bypassTokenType The bypass token type assigned to the parser rule.
# @param label The label associated with the rule tag, or {@code null} if
# the rule tag is unlabeled.
#
# @exception IllegalArgumentException if {@code ruleName} is {@code null}
# or empty.
def __init__(self, ruleName:str, bypassTokenType:int, label:str=None):
if ruleName is None or len(ruleName)==0:
raise Exception("ruleName cannot be null or empty.")
self.source = None
self.type = bypassTokenType # token type of the token
self.channel = Token.DEFAULT_CHANNEL # The parser ignores everything not on DEFAULT_CHANNEL
self.start = -1 # optional; return -1 if not implemented.
self.stop = -1 # optional; return -1 if not implemented.
self.tokenIndex = -1 # from 0..n-1 of the token object in the input stream
self.line = 0 # line=1..n of the 1st character
self.column = -1 # beginning of the line at which it occurs, 0..n-1
self.label = label
self._text = self.getText() # text of the token.
self.ruleName = ruleName
def getText(self):
if self.label is None:
return "<" + self.ruleName + ">"
else:
return "<" + self.label + ":" + self.ruleName + ">"
| bsd-3-clause |
afrolov1/nova | nova/api/manager.py | 119 | 1216 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import manager
from nova.network import driver
class MetadataManager(manager.Manager):
"""Metadata Manager.
This class manages the Metadata API service initialization. Currently, it
just adds an iptables filter rule for the metadata service.
"""
def __init__(self, *args, **kwargs):
super(MetadataManager, self).__init__(*args, **kwargs)
self.network_driver = driver.load_network_driver()
self.network_driver.metadata_accept()
| apache-2.0 |
adambrenecki/django | django/contrib/formtools/tests/wizard/namedwizardtests/tests.py | 127 | 15125 | from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.http import QueryDict
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.formtools.wizard.views import (NamedUrlSessionWizardView,
NamedUrlCookieWizardView)
from django.contrib.formtools.tests.wizard.test_forms import get_request, Step1, Step2
class NamedWizardTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def setUp(self):
self.testuser, created = User.objects.get_or_create(username='testuser1')
self.wizard_step_data[0]['form1-user'] = self.testuser.pk
def test_initial_call(self):
response = self.client.get(reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form1')
self.assertEqual(wizard['steps'].step0, 0)
self.assertEqual(wizard['steps'].step1, 1)
self.assertEqual(wizard['steps'].last, 'form4')
self.assertEqual(wizard['steps'].prev, None)
self.assertEqual(wizard['steps'].next, 'form2')
self.assertEqual(wizard['steps'].count, 4)
self.assertEqual(wizard['url_name'], self.wizard_urlname)
def test_initial_call_with_params(self):
get_params = {'getvar1': 'getval1', 'getvar2': 'getval2'}
response = self.client.get(reverse('%s_start' % self.wizard_urlname),
get_params)
self.assertEqual(response.status_code, 302)
# Test for proper redirect GET parameters
location = response.url
self.assertNotEqual(location.find('?'), -1)
querydict = QueryDict(location[location.find('?') + 1:])
self.assertEqual(dict(querydict.items()), get_params)
def test_form_post_error(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_1_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
self.assertEqual(response.context['wizard']['form'].errors,
{'name': ['This field is required.'],
'user': ['This field is required.']})
def test_form_post_success(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
wizard = response.context['wizard']
self.assertEqual(wizard['steps'].current, 'form2')
self.assertEqual(wizard['steps'].step0, 1)
self.assertEqual(wizard['steps'].prev, 'form1')
self.assertEqual(wizard['steps'].next, 'form3')
def test_form_stepback(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.post(
reverse(self.wizard_urlname, kwargs={
'step': response.context['wizard']['steps'].current
}), {'wizard_goto_step': response.context['wizard']['steps'].prev})
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_jump(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form3'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
def test_form_finish(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
post_data = self.wizard_step_data[1]
post_data['form2-file1'].close()
post_data['form2-file1'] = open(__file__, 'rb')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form3')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form4')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
all_data = response.context['form_list']
with open(__file__, 'rb') as f:
self.assertEqual(all_data[1]['file1'].read(), f.read())
all_data[1]['file1'].close()
del all_data[1]['file1']
self.assertEqual(all_data, [
{'name': 'Pony', 'thirsty': True, 'user': self.testuser},
{'address1': '123 Main St', 'address2': 'Djangoland'},
{'random_crap': 'blah blah'},
[{'random_crap': 'blah blah'}, {'random_crap': 'blah blah'}]])
def test_cleaned_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'] = open(__file__, 'rb')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
step2_url = reverse(self.wizard_urlname, kwargs={'step': 'form2'})
response = self.client.get(step2_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
with open(__file__, 'rb') as f:
self.assertEqual(
response.context['wizard']['form'].files['form2-file1'].read(),
f.read())
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
all_data = response.context['all_cleaned_data']
with open(__file__, 'rb') as f:
self.assertEqual(all_data['file1'].read(), f.read())
all_data['file1'].close()
del all_data['file1']
self.assertEqual(
all_data,
{'name': 'Pony', 'thirsty': True, 'user': self.testuser,
'address1': '123 Main St', 'address2': 'Djangoland',
'random_crap': 'blah blah', 'formset-form4': [
{'random_crap': 'blah blah'},
{'random_crap': 'blah blah'}
]})
def test_manipulated_data(self):
response = self.client.get(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
post_data = self.wizard_step_data[1]
post_data['form2-file1'].close()
post_data['form2-file1'] = open(__file__, 'rb')
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
post_data)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[2])
loc = response.url
response = self.client.get(loc)
self.assertEqual(response.status_code, 200, loc)
self.client.cookies.pop('sessionid', None)
self.client.cookies.pop('wizard_cookie_contact_wizard', None)
response = self.client.post(
reverse(self.wizard_urlname,
kwargs={'step': response.context['wizard']['steps'].current}),
self.wizard_step_data[3])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
def test_form_reset(self):
response = self.client.post(
reverse(self.wizard_urlname, kwargs={'step': 'form1'}),
self.wizard_step_data[0])
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form2')
response = self.client.get(
'%s?reset=1' % reverse('%s_start' % self.wizard_urlname))
self.assertEqual(response.status_code, 302)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['wizard']['steps'].current, 'form1')
@skipIfCustomUser
class NamedSessionWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_session'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
@skipIfCustomUser
class NamedCookieWizardTests(NamedWizardTests, TestCase):
wizard_urlname = 'nwiz_cookie'
wizard_step_1_data = {
'cookie_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'cookie_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'cookie_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'cookie_contact_wizard-current_step': 'form4',
}
)
class NamedFormTests(object):
urls = 'django.contrib.formtools.tests.wizard.namedwizardtests.urls'
def test_revalidation(self):
request = get_request()
testform = self.formwizard_class.as_view(
[('start', Step1), ('step2', Step2)],
url_name=self.wizard_urlname)
response, instance = testform(request, step='done')
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class TestNamedUrlSessionWizardView(NamedUrlSessionWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlSessionWizardView, self).dispatch(request, *args, **kwargs)
return response, self
class TestNamedUrlCookieWizardView(NamedUrlCookieWizardView):
def dispatch(self, request, *args, **kwargs):
response = super(TestNamedUrlCookieWizardView, self).dispatch(request, *args, **kwargs)
return response, self
@skipIfCustomUser
class NamedSessionFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlSessionWizardView
wizard_urlname = 'nwiz_session'
@skipIfCustomUser
class NamedCookieFormTests(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlCookieWizardView
wizard_urlname = 'nwiz_cookie'
| bsd-3-clause |
jgresula/jagpdf | code/tools/external/python/pygccxml/declarations/pattern_parser.py | 1 | 4743 | # Copyright 2004 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""implementation details"""
import types
class parser_t( object ):
"""implementation details"""
def __init__( self
, pattern_char_begin
, pattern_char_end
, pattern_char_separator ):
self.__begin = pattern_char_begin
self.__end = pattern_char_end
self.__separator = pattern_char_separator
#right now parser does not take into account next qualifiers, but it will
self.__text_qualifier = '"'
self.__char_qualifier = "'"
self.__escape = '\\'
def has_pattern( self, decl_string ):
"""implementation details"""
last_part = decl_string.split( '::' )[-1]
return -1 != last_part.find( self.__end )
def name( self, decl_string ):
"""implementation details"""
assert isinstance( decl_string, types.StringTypes )
if not self.has_pattern( decl_string ):
return decl_string
args_begin = decl_string.find( self.__begin )
return decl_string[0: args_begin].strip()
def __find_args_separator( self, decl_string, start_pos ):
"""implementation details"""
bracket_depth = 0
for index, ch in enumerate( decl_string[start_pos:] ):
if ch not in ( self.__begin, self.__end, self.__separator ):
continue #I am interested only in < and >
elif self.__separator == ch:
if not bracket_depth:
return index + start_pos
elif self.__begin == ch:
bracket_depth += 1
elif not bracket_depth:
return index + start_pos
else:
bracket_depth -= 1
return -1
def args( self, decl_string ):
"""implementation details"""
args_begin = decl_string.find( self.__begin )
args_end = decl_string.rfind( self.__end )
if -1 in ( args_begin, args_end ) or args_begin == args_end:
raise RuntimeError( "%s doesn't valid template instantiation string" % decl_string )
args_only = decl_string[args_begin + 1: args_end ]
args = []
previous_found, found = 0, 0
while True:
found = self.__find_args_separator( args_only, previous_found)
if -1 == found:
args.append( args_only[ previous_found : ] )
break
#elif decl_string[ found ] == self.__end:
# print args
# raise RuntimeError( "unmatched '%s' token has been found." % self.__end )
else:
args.append( args_only[ previous_found : found ] )
previous_found = found + 1 #skip found sep
return [ arg.strip() for arg in args ]
NOT_FOUND = ( -1, -1 )
"""implementation details"""
def find_args(self, text, start=None ):
"""implementation details"""
if start==None:
start = 0
first_occurance = text.find( self.__begin, start )
if first_occurance == -1:
return self.NOT_FOUND
previous_found, found = first_occurance + 1, 0
while True:
found = self.__find_args_separator( text, previous_found)
if -1 == found:
return self.NOT_FOUND
elif text[ found ] == self.__end:
return ( first_occurance, found )
else:
previous_found = found + 1 #skip found sep
def split( self, decl_string ):
"""implementation details"""
assert self.has_pattern( decl_string )
return self.name( decl_string ), self.args( decl_string )
def split_recursive( self, decl_string ):
"""implementation details"""
assert self.has_pattern( decl_string )
answer = []
to_go = [ decl_string ]
while to_go:
name, args = self.split( to_go.pop() )
answer.append( ( name, args ) )
for arg in args:
if self.has_pattern( arg ):
to_go.append( arg )
return answer
def join( self, name, args, arg_separator=None ):
"""implementation details"""
if None is arg_separator:
arg_separator = ', '
args = filter( None, args)
args_str = ''
if not args:
args_str = ' '
elif 1 == len( args ):
args_str = ' ' + args[0] + ' '
else:
args_str = ' ' + arg_separator.join( args ) + ' '
return ''.join( [ name, self.__begin, args_str, self.__end ] )
| mit |
pacificIT/freevo2 | src/plugins/shutdown.py | 1 | 4745 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# shutdown.py - shutdown plugin
# -----------------------------------------------------------------------------
# $Id$
#
# This plugin creates the shutdown main menu item
#
# First edition: Dirk Meyer <[email protected]>
# Maintainer: Dirk Meyer <[email protected]>
#
# -----------------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2002-2005 Krister Lagerstrom, Dirk Meyer, et al.
# Please see the file doc/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
# python imports
import sys
import os
# kaa imports
import kaa
# freevo imports
from .. import core as freevo
from ..core.system import manager as system
# get shutdown config
config = freevo.config.plugin.shutdown
class ShutdownItem(freevo.MainMenuItem):
"""
Item for shutdown
"""
def actions(self):
"""
return a list of actions for this item
"""
def choose_cb(confirm, noconfirm):
if config.confirm:
return confirm
return noconfirm
items = [ freevo.Action(_('Shutdown Freevo'), choose_cb(self.confirm_freevo, system.exit)),
freevo.Action(_('Restart Freevo'), choose_cb(self.confirm_freevo_restart, system.restart))]
if system.can_suspend():
items.append(freevo.Action(_('Suspend system'), choose_cb(self.confirm_suspend, system.suspend)))
if system.can_hibernate():
items.append(freevo.Action(_('Hibernate system'), choose_cb(self.confirm_hibernate, system.hibernate)))
if system.can_shutdown():
items.append(freevo.Action(_('Shutdown system'), choose_cb(self.confirm_system, system.shutdown)))
if system.can_reboot():
items.append(freevo.Action(_('Restart system'), choose_cb(self.confirm_sys_restart, system.restart)))
if config.default == 'system':
items = items[2:] + items[:2]
return items
def confirm_freevo(self):
"""
Pops up a ConfirmWindow.
"""
what = _('Do you really want to shut down Freevo?')
box = freevo.ConfirmWindow(what, default_choice=1)
box.buttons[0].connect(system.exit)
box.show()
def confirm_system(self):
"""
Pops up a ConfirmWindow.
"""
what = _('Do you really want to shut down the system?')
box = freevo.ConfirmWindow(what, default_choice=1)
box.buttons[0].connect(system.shutdown)
box.show()
def confirm_freevo_restart(self):
"""
Pops up a ConfirmWindow.
"""
what = _('Do you really want to restart Freevo?')
box = freevo.ConfirmWindow(what, default_choice=1)
box.buttons[0].connect(system.restart)
box.show()
def confirm_sys_restart(self):
"""
Pops up a ConfirmWindow.
"""
what = _('Do you really want to restart the system?')
box = freevo.ConfirmWindow(what, default_choice=1)
box.buttons[0].connect(system.reboot)
box.show()
def confirm_suspend(self):
"""
Pops up a ConfirmWindow.
"""
what = _('Do you really want to suspend the system?')
box = freevo.ConfirmWindow(what, default_choice=1)
box.buttons[0].connect(system.suspend)
box.show()
def confirm_hibernate(self):
"""
Pops up a ConfirmWindow.
"""
what = _('Do you really want to hibernate the system?')
box = freevo.ConfirmWindow(what, default_choice=1)
box.buttons[0].connect(system.hibernate)
box.show()
#
# the plugin is defined here
#
class PluginInterface(freevo.MainMenuPlugin):
"""
Plugin to shutdown Freevo from the main menu
"""
def items(self, parent):
return [ ShutdownItem(parent, _('Shutdown')) ]
| gpl-2.0 |
mikaelboman/home-assistant | homeassistant/components/lock/mqtt.py | 8 | 4282 | """
Support for MQTT locks.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/lock.mqtt/
"""
import logging
import voluptuous as vol
import homeassistant.components.mqtt as mqtt
from homeassistant.components.lock import LockDevice
from homeassistant.const import CONF_NAME, CONF_OPTIMISTIC, CONF_VALUE_TEMPLATE
from homeassistant.components.mqtt import (
CONF_STATE_TOPIC, CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN)
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
CONF_PAYLOAD_LOCK = 'payload_lock'
CONF_PAYLOAD_UNLOCK = 'payload_unlock'
DEFAULT_NAME = "MQTT Lock"
DEFAULT_PAYLOAD_LOCK = "LOCK"
DEFAULT_PAYLOAD_UNLOCK = "UNLOCK"
DEFAULT_OPTIMISTIC = False
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_LOCK, default=DEFAULT_PAYLOAD_LOCK):
cv.string,
vol.Optional(CONF_PAYLOAD_UNLOCK, default=DEFAULT_PAYLOAD_UNLOCK):
cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the MQTT lock."""
add_devices_callback([MqttLock(
hass,
config[CONF_NAME],
config.get(CONF_STATE_TOPIC),
config[CONF_COMMAND_TOPIC],
config[CONF_QOS],
config[CONF_RETAIN],
config[CONF_PAYLOAD_LOCK],
config[CONF_PAYLOAD_UNLOCK],
config[CONF_OPTIMISTIC],
config.get(CONF_VALUE_TEMPLATE))])
# pylint: disable=too-many-arguments, too-many-instance-attributes
class MqttLock(LockDevice):
"""Represents a lock that can be toggled using MQTT."""
def __init__(self, hass, name, state_topic, command_topic, qos, retain,
payload_lock, payload_unlock, optimistic, value_template):
"""Initialize the lock."""
self._state = False
self._hass = hass
self._name = name
self._state_topic = state_topic
self._command_topic = command_topic
self._qos = qos
self._retain = retain
self._payload_lock = payload_lock
self._payload_unlock = payload_unlock
self._optimistic = optimistic
def message_received(topic, payload, qos):
"""A new MQTT message has been received."""
if value_template is not None:
payload = template.render_with_possible_json_value(
hass, value_template, payload)
if payload == self._payload_lock:
self._state = True
self.update_ha_state()
elif payload == self._payload_unlock:
self._state = False
self.update_ha_state()
if self._state_topic is None:
# Force into optimistic mode.
self._optimistic = True
else:
mqtt.subscribe(hass, self._state_topic, message_received,
self._qos)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""The name of the lock."""
return self._name
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
def lock(self, **kwargs):
"""Lock the device."""
mqtt.publish(self.hass, self._command_topic, self._payload_lock,
self._qos, self._retain)
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = True
self.update_ha_state()
def unlock(self, **kwargs):
"""Unlock the device."""
mqtt.publish(self.hass, self._command_topic, self._payload_unlock,
self._qos, self._retain)
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = False
self.update_ha_state()
| mit |
huguesmayolle/famille | famille/migrations/0032_auto__add_field_prestataire_restrictions.py | 1 | 22005 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Prestataire.restrictions'
db.add_column(u'famille_prestataire', 'restrictions',
self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Prestataire.restrictions'
db.delete_column(u'famille_prestataire', 'restrictions')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'famille.enfant': {
'Meta': {'object_name': 'Enfant'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'e_birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'birthday'", 'blank': 'True'}),
'e_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_column': "'name'"}),
'e_school': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_column': "'school'", 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enfants'", 'to': "orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.famille': {
'Meta': {'object_name': 'Famille'},
'baby': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cdt_periscolaire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'France'", 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'devoirs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diploma': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'geolocation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['famille.Geolocation']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'langue': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'menage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'non_fumeur': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nuit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'basic'", 'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'profile_pic': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'psc1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pseudo': ('django.db.models.fields.CharField', [], {'max_length': '60', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'repassage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sortie_ecole': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tarif': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'tel_visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'type_garde': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'type_presta': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'urgence': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'visibility_family': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visibility_global': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'visibility_prestataire': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'famille.famillefavorite': {
'Meta': {'object_name': 'FamilleFavorite'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favorites'", 'to': "orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.familleplanning': {
'Meta': {'object_name': 'FamillePlanning'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'planning'", 'to': "orm['famille.Famille']"}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Schedule']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'weekday': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Weekday']", 'symmetrical': 'False'})
},
'famille.familleratings': {
'Meta': {'object_name': 'FamilleRatings'},
'amability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'by': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ponctuality': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'reliability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'serious': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['famille.Famille']"})
},
'famille.geolocation': {
'Meta': {'object_name': 'Geolocation'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'has_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.prestataire': {
'Meta': {'object_name': 'Prestataire'},
'baby': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cdt_periscolaire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'France'", 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'devoirs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diploma': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'geolocation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['famille.Geolocation']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_de': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_en': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_es': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_it': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'menage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'non_fumeur': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nuit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'other_language': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'other_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'permis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'basic'", 'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'profile_pic': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'psc1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'repassage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'restrictions': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'resume': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'sortie_ecole': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tarif': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'tel_visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'type_garde': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'urgence': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
'famille.prestatairefavorite': {
'Meta': {'object_name': 'PrestataireFavorite'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favorites'", 'to': "orm['famille.Prestataire']"}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.prestataireplanning': {
'Meta': {'object_name': 'PrestatairePlanning'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'planning'", 'to': "orm['famille.Prestataire']"}),
'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Schedule']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'weekday': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Weekday']", 'symmetrical': 'False'})
},
'famille.prestataireratings': {
'Meta': {'object_name': 'PrestataireRatings'},
'amability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'by': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ponctuality': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'reliability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'serious': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['famille.Prestataire']"})
},
'famille.reference': {
'Meta': {'object_name': 'Reference'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'references'", 'to': "orm['famille.Prestataire']"}),
'referenced_user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'reference_of'", 'unique': 'True', 'null': 'True', 'to': "orm['famille.Famille']"}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.schedule': {
'Meta': {'object_name': 'Schedule'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'famille.weekday': {
'Meta': {'object_name': 'Weekday'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '15'})
}
}
complete_apps = ['famille'] | apache-2.0 |
ychen820/microblog | y/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/images/deprecate.py | 4 | 7668 | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for deprecating images."""
import datetime
from googlecloudapis.compute.v1 import compute_v1_messages
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.compute.lib import base_classes
DEPRECATION_STATUSES = sorted(
['ACTIVE'] +
compute_v1_messages.DeprecationStatus.StateValueValuesEnum.to_dict().keys())
def _ResolveTime(absolute, relative_sec, current_time):
"""Get the RFC 3339 time string for a provided absolute or relative time."""
if absolute:
return absolute
elif relative_sec:
return (
current_time + datetime.timedelta(seconds=relative_sec)
).replace(microsecond=0).isoformat()
else:
return None
class DeprecateImages(base_classes.NoOutputAsyncMutator):
"""Deprecate Google Compute Engine images."""
@staticmethod
def Args(parser):
parser.add_argument(
'name',
metavar='NAME',
help='The name of the image to set deprecation status of.')
state = parser.add_argument(
'--state',
choices=DEPRECATION_STATUSES,
type=lambda x: x.upper(),
required=True,
help='The deprecation state to set on the image.')
state.detailed_help = """\
The deprecation state to set on the image.
An image's default state is ``ACTIVE'', suggesting that the image is
currently supported. Operations which create a new
resource using a ``DEPRECATED'' image
return successfully, but with a warning indicating that the image
is deprecated and recommending its replacement. New uses of ``OBSOLETE'' or
``DELETED'' images result in an error. Note that setting the
deprecation state to ``DELETED'' will not automatically delete the
image. You must still make a request to delete the image to remove it
from the image list.
"""
replacement = parser.add_argument(
'--replacement',
help='Specifies a Compute Engine image as a replacement.')
replacement.detailed_help = """\
Specifies a Compute Engine image as a replacement for the image
being phased out. Users of the deprecated image will be advised to switch
to this replacement. For example, ``--replacement example-image'' or
``--replacement projects/google/global/images/example-image''. This
flag is required when setting the image state to anything other than
``ACTIVE'' or when --delete-in, --delete-on, --obsolete-in, or
--obsolete-on is provided.
"""
delete_group = parser.add_mutually_exclusive_group()
delete_on = delete_group.add_argument(
'--delete-on',
help=('Specifies the date and time when the state of this image '
'will become DELETED.'))
delete_on.detailed_help = """\
Similar to --delete-in, but specifies an absolute time when the status
should be set to DELETED. The date and time
specified must be a valid RFC 3339 full-date or date-time.
For times in UTC, this looks like ``YYYY-MM-DDTHH:MM:SSZ''. For example:
2020-01-02T00:00:00Z for midnight on January 2, 2020 in UTC.
This flag is mutually exclusive with --delete-in.
"""
delete_in = delete_group.add_argument(
'--delete-in',
help=('Specifies the amount of time until this image should become '
'DELETED.'),
type=arg_parsers.Duration())
delete_in.detailed_help = """\
Specifies the amount of time until the image's status should be set
to DELETED. For instance, specifying ``30d'' will set the status to
DELETED in 30 days from the current system time. Valid units for this
flag are ``s'' for seconds, ``m'' for minutes, ``h'' for hours and
``d'' for days. If no unit is specified, seconds is assumed.
Note that the image will not be deleted automatically. The image will
only be marked as deleted. An explicit request to delete the image must
be made in order to remove it from the image list.
This flag is mutually exclusive with --delete-on.
"""
obsolete_group = parser.add_mutually_exclusive_group()
obsolete_on = obsolete_group.add_argument(
'--obsolete-on',
help=('Specifies the date and time when the state of this image '
'will become OBSOLETE.'))
obsolete_on.detailed_help = """\
Specifies time (in the same format as --delete-on) when this image's
status should become OBSOLETE.
This flag is mutually exclusive with --obsolete-in.
"""
obsolete_in = obsolete_group.add_argument(
'--obsolete-in',
help=('Specifies the amount of time until this image should become '
'OBSOLETE.'),
type=arg_parsers.Duration())
obsolete_in.detailed_help = """\
Specifies time (in the same format as --delete-in) until this image's
status should become OBSOLETE. Obsolete images will cause an error
whenever an attempt is made to apply the image to a new disk.
This flag is mutually exclusive with --obsolete-on.
"""
@property
def service(self):
return self.compute.images
@property
def method(self):
return 'Deprecate'
@property
def resource_type(self):
return 'images'
def CreateRequests(self, args):
"""Returns a list of requests necessary for deprecating images."""
if (any([args.delete_on, args.delete_in, args.obsolete_on, args.obsolete_in,
args.replacement]) and args.state == 'ACTIVE'):
raise calliope_exceptions.ToolException(
'If the state is set to [ACTIVE] then none of [--delete-on], '
'[--delete-in], [--obsolete-on], [--obsolete-in], or [--replacement] '
'may be provided.')
elif not args.replacement and args.state != 'ACTIVE':
raise calliope_exceptions.ToolException(
'[--replacement] is required when deprecation, obsoletion or '
'deletion status is set or scheduled.')
# Determine the date and time to deprecate for each flag set.
current_time = datetime.datetime.now()
delete_time = _ResolveTime(args.delete_on, args.delete_in, current_time)
obsolete_time = _ResolveTime(
args.obsolete_on, args.obsolete_in, current_time)
# ACTIVE is not actually an option in the state enum.
if args.state == 'ACTIVE':
state = None
else:
state = self.messages.DeprecationStatus.StateValueValuesEnum(args.state)
if args.replacement:
replacement_uri = self.CreateGlobalReference(args.replacement).SelfLink()
else:
replacement_uri = None
image_ref = self.CreateGlobalReference(args.name)
request = self.messages.ComputeImagesDeprecateRequest(
deprecationStatus=self.messages.DeprecationStatus(
state=state,
deleted=delete_time,
obsolete=obsolete_time,
replacement=replacement_uri),
image=image_ref.Name(),
project=self.project)
return [request]
DeprecateImages.detailed_help = {
'brief': 'Manage deprecation status of Google Compute Engine images',
'DESCRIPTION': """\
*{command}* is used to deprecate images.
""",
'EXAMPLES': """\
To deprecate an image called 'IMAGE' immediately, mark it as
obsolete in one day, and mark it as deleted in two days, use:
$ {command} IMAGE --status DEPRECATED --obsolete-in 1d --delete-in 2d
To un-deprecate an image called 'IMAGE', use:
$ {command} IMAGE --status ACTIVE
""",
}
| bsd-3-clause |
jonathan-beard/edx-platform | lms/djangoapps/ccx/models.py | 46 | 3629 | """
Models for the custom course feature
"""
from datetime import datetime
import logging
from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import UTC
from lazy import lazy
from xmodule_django.models import CourseKeyField, LocationKeyField # pylint: disable=import-error
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.ccx")
class CustomCourseForEdX(models.Model):
"""
A Custom Course.
"""
course_id = CourseKeyField(max_length=255, db_index=True)
display_name = models.CharField(max_length=255)
coach = models.ForeignKey(User, db_index=True)
@lazy
def course(self):
"""Return the CourseDescriptor of the course related to this CCX"""
store = modulestore()
with store.bulk_operations(self.course_id):
course = store.get_course(self.course_id)
if not course or isinstance(course, ErrorDescriptor):
log.error("CCX {0} from {2} course {1}".format( # pylint: disable=logging-format-interpolation
self.display_name, self.course_id, "broken" if course else "non-existent"
))
return course
@lazy
def start(self):
"""Get the value of the override of the 'start' datetime for this CCX
"""
# avoid circular import problems
from .overrides import get_override_for_ccx
return get_override_for_ccx(self, self.course, 'start')
@lazy
def due(self):
"""Get the value of the override of the 'due' datetime for this CCX
"""
# avoid circular import problems
from .overrides import get_override_for_ccx
return get_override_for_ccx(self, self.course, 'due')
def has_started(self):
"""Return True if the CCX start date is in the past"""
return datetime.now(UTC()) > self.start
def has_ended(self):
"""Return True if the CCX due date is set and is in the past"""
if self.due is None:
return False
return datetime.now(UTC()) > self.due
def start_datetime_text(self, format_string="SHORT_DATE"):
"""Returns the desired text representation of the CCX start datetime
The returned value is always expressed in UTC
"""
i18n = self.course.runtime.service(self.course, "i18n")
strftime = i18n.strftime
value = strftime(self.start, format_string)
if format_string == 'DATE_TIME':
value += u' UTC'
return value
def end_datetime_text(self, format_string="SHORT_DATE"):
"""Returns the desired text representation of the CCX due datetime
If the due date for the CCX is not set, the value returned is the empty
string.
The returned value is always expressed in UTC
"""
if self.due is None:
return ''
i18n = self.course.runtime.service(self.course, "i18n")
strftime = i18n.strftime
value = strftime(self.due, format_string)
if format_string == 'DATE_TIME':
value += u' UTC'
return value
class CcxFieldOverride(models.Model):
"""
Field overrides for custom courses.
"""
ccx = models.ForeignKey(CustomCourseForEdX, db_index=True)
location = LocationKeyField(max_length=255, db_index=True)
field = models.CharField(max_length=255)
class Meta(object): # pylint: disable=missing-docstring
unique_together = (('ccx', 'location', 'field'),)
value = models.TextField(default='null')
| agpl-3.0 |
Tesla-Redux-Devices/android_kernel_samsung_trlte | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
tschneidereit/servo | python/toml/toml.py | 29 | 15580 | import datetime, decimal, re
try:
_range = xrange
except NameError:
unicode = str
_range = range
basestring = str
unichr = chr
def load(f):
"""Returns a dictionary containing the named file parsed as toml."""
if isinstance(f, basestring):
with open(f) as ffile:
return loads(ffile.read())
elif isinstance(f, list):
for l in f:
if not isinstance(l, basestring):
raise Exception("Load expects a list to contain filenames only")
d = []
for l in f:
d.append(load(l))
r = {}
for l in d:
toml_merge_dict(r, l)
return r
elif f.read:
return loads(f.read())
else:
raise Exception("You can only load a file descriptor, filename or list")
def loads(s):
"""Returns a dictionary containing s, a string, parsed as toml."""
implicitgroups = []
retval = {}
currentlevel = retval
if isinstance(s, basestring):
try:
s.decode('utf8')
except AttributeError:
pass
sl = list(s)
openarr = 0
openstring = False
arrayoftables = False
beginline = True
keygroup = False
delnum = 1
for i in range(len(sl)):
if sl[i] == '"':
oddbackslash = False
try:
k = 1
j = sl[i-k]
oddbackslash = False
while j == '\\':
oddbackslash = not oddbackslash
k += 1
j = sl[i-k]
except IndexError:
pass
if not oddbackslash:
openstring = not openstring
if keygroup and (sl[i] == ' ' or sl[i] == '\t'):
keygroup = False
if arrayoftables and (sl[i] == ' ' or sl[i] == '\t'):
arrayoftables = False
if sl[i] == '#' and not openstring and not keygroup and not arrayoftables:
j = i
while sl[j] != '\n':
sl.insert(j, ' ')
sl.pop(j+1)
j += 1
if sl[i] == '[' and not openstring and not keygroup and not arrayoftables:
if beginline:
if sl[i+1] == '[':
arrayoftables = True
else:
keygroup = True
else:
openarr += 1
if sl[i] == ']' and not openstring:
if keygroup:
keygroup = False
elif arrayoftables:
if sl[i-1] == ']':
arrayoftables = False
else:
openarr -= 1
if sl[i] == '\n':
if openstring:
raise Exception("Unbalanced quotes")
if openarr:
sl.insert(i, ' ')
sl.pop(i+1)
else:
beginline = True
elif beginline and sl[i] != ' ' and sl[i] != '\t':
beginline = False
keygroup = True
s = ''.join(sl)
s = s.split('\n')
else:
raise Exception("What exactly are you trying to pull?")
for line in s:
line = line.strip()
if line == "":
continue
if line[0] == '[':
arrayoftables = False
if line[1] == '[':
arrayoftables = True
line = line[2:].split(']]', 1)
else:
line = line[1:].split(']', 1)
if line[1].strip() != "":
raise Exception("Key group not on a line by itself.")
line = line[0]
if '[' in line:
raise Exception("Key group name cannot contain '['")
if ']' in line:
raise Exception("Key group name cannot contain']'")
groups = line.split('.')
currentlevel = retval
for i in range(len(groups)):
group = groups[i]
if group == "":
raise Exception("Can't have a keygroup with an empty name")
try:
currentlevel[group]
if i == len(groups) - 1:
if group in implicitgroups:
implicitgroups.remove(group)
if arrayoftables:
raise Exception("An implicitly defined table can't be an array")
elif arrayoftables:
currentlevel[group].append({})
else:
raise Exception("What? "+group+" already exists?"+str(currentlevel))
except TypeError:
if i != len(groups) - 1:
implicitgroups.append(group)
currentlevel = currentlevel[0]
if arrayoftables:
currentlevel[group] = [{}]
else:
currentlevel[group] = {}
except KeyError:
if i != len(groups) - 1:
implicitgroups.append(group)
currentlevel[group] = {}
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [{}]
currentlevel = currentlevel[group]
if arrayoftables:
try:
currentlevel = currentlevel[-1]
except KeyError:
pass
elif "=" in line:
i = 1
pair = line.split('=', i)
l = len(line)
while pair[-1][0] != ' ' and pair[-1][0] != '\t' and pair[-1][0] != '"' and pair[-1][0] != '[' and pair[-1] != 'true' and pair[-1] != 'false':
try:
float(pair[-1])
break
except ValueError:
try:
datetime.datetime.strptime(pair[-1], "%Y-%m-%dT%H:%M:%SZ")
break
except ValueError:
i += 1
pair = line.split('=', i)
newpair = []
newpair.append('='.join(pair[:-1]))
newpair.append(pair[-1])
pair = newpair
pair[0] = pair[0].strip()
pair[1] = pair[1].strip()
value, vtype = load_value(pair[1])
try:
currentlevel[pair[0]]
raise Exception("Duplicate keys!")
except KeyError:
currentlevel[pair[0]] = value
return retval
def load_value(v):
if v == 'true':
return (True, "bool")
elif v == 'false':
return (False, "bool")
elif v[0] == '"':
testv = v[1:].split('"')
closed = False
for tv in testv:
if tv == '':
closed = True
else:
oddbackslash = False
try:
i = -1
j = tv[i]
while j == '\\':
oddbackslash = not oddbackslash
i -= 1
j = tv[i]
except IndexError:
pass
if not oddbackslash:
if closed:
raise Exception("Stuff after closed string. WTF?")
else:
closed = True
escapes = ['0', 'b', 'f', '/', 'n', 'r', 't', '"', '\\']
escapedchars = ['\0', '\b', '\f', '/', '\n', '\r', '\t', '\"', '\\']
escapeseqs = v.split('\\')[1:]
backslash = False
for i in escapeseqs:
if i == '':
backslash = not backslash
else:
if i[0] not in escapes and i[0] != 'u' and not backslash:
raise Exception("Reserved escape sequence used")
if backslash:
backslash = False
if "\\u" in v:
hexchars = ['0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
hexbytes = v.split('\\u')
newv = hexbytes[0]
hexbytes = hexbytes[1:]
for hx in hexbytes:
hxb = ""
try:
if hx[0].lower() in hexchars:
hxb += hx[0].lower()
if hx[1].lower() in hexchars:
hxb += hx[1].lower()
if hx[2].lower() in hexchars:
hxb += hx[2].lower()
if hx[3].lower() in hexchars:
hxb += hx[3].lower()
except IndexError:
if len(hxb) != 2:
raise Exception("Invalid escape sequence")
if len(hxb) != 4 and len(hxb) != 2:
raise Exception("Invalid escape sequence")
newv += unichr(int(hxb, 16))
newv += unicode(hx[len(hxb):])
v = newv
for i in range(len(escapes)):
if escapes[i] == '\\':
v = v.replace("\\"+escapes[i], escapedchars[i])
else:
v = re.sub("([^\\\\](\\\\\\\\)*)\\\\"+escapes[i], "\\1"+escapedchars[i], v)
return (v[1:-1], "str")
elif v[0] == '[':
return (load_array(v), "array")
elif len(v) == 20 and v[-1] == 'Z':
if v[10] == 'T':
return (datetime.datetime.strptime(v, "%Y-%m-%dT%H:%M:%SZ"), "date")
else:
raise Exception("Wait, what?")
else:
itype = "int"
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
neg = False
if v[0] == '-':
neg = True
v = v[1:]
if '.' in v:
if v.split('.', 1)[1] == '':
raise Exception("This float is missing digits after the point")
if v[0] not in digits:
raise Exception("This float doesn't have a leading digit")
v = float(v)
itype = "float"
else:
v = int(v)
if neg:
return (0 - v, itype)
return (v, itype)
def load_array(a):
atype = None
retval = []
a = a.strip()
if '[' not in a[1:-1]:
strarray = False
tmpa = a[1:-1].strip()
if tmpa != '' and tmpa[0] == '"':
strarray = True
a = a[1:-1].split(',')
b = 0
if strarray:
while b < len(a) - 1:
while a[b].strip()[-1] != '"' and a[b+1].strip()[0] != '"':
a[b] = a[b] + ',' + a[b+1]
if b < len(a) - 2:
a = a[:b+1] + a[b+2:]
else:
a = a[:b+1]
b += 1
else:
al = list(a[1:-1])
a = []
openarr = 0
j = 0
for i in range(len(al)):
if al[i] == '[':
openarr += 1
elif al[i] == ']':
openarr -= 1
elif al[i] == ',' and not openarr:
a.append(''.join(al[j:i]))
j = i+1
a.append(''.join(al[j:]))
for i in range(len(a)):
a[i] = a[i].strip()
if a[i] != '':
nval, ntype = load_value(a[i])
if atype:
if ntype != atype:
raise Exception("Not a homogeneous array")
else:
atype = ntype
retval.append(nval)
return retval
def dump(o, f):
"""Writes out to f the toml corresponding to o. Returns said toml."""
if f.write:
d = dumps(o)
f.write(d)
return d
else:
raise Exception("You can only dump an object to a file descriptor")
def dumps(o):
"""Returns a string containing the toml corresponding to o, a dictionary"""
retval = ""
addtoretval, sections = dump_sections(o, "")
retval += addtoretval
while sections != {}:
newsections = {}
for section in sections:
addtoretval, addtosections = dump_sections(sections[section], section)
if addtoretval:
retval += "["+section+"]\n"
retval += addtoretval
for s in addtosections:
newsections[section+"."+s] = addtosections[s]
sections = newsections
return retval
def dump_sections(o, sup):
retstr = ""
if sup != "" and sup[-1] != ".":
sup += '.'
retdict = {}
arraystr = ""
for section in o:
if not isinstance(o[section], dict):
arrayoftables = False
if isinstance(o[section], list):
for a in o[section]:
if isinstance(a, dict):
arrayoftables = True
if arrayoftables:
for a in o[section]:
arraytabstr = ""
arraystr += "[["+sup+section+"]]\n"
s, d = dump_sections(a, sup+section)
if s:
if s[0] == "[":
arraytabstr += s
else:
arraystr += s
while d != {}:
newd = {}
for dsec in d:
s1, d1 = dump_sections(d[dsec], sup+section+dsec)
if s1:
arraytabstr += "["+sup+section+"."+dsec+"]\n"
arraytabstr += s1
for s1 in d1:
newd[dsec+"."+s1] = d1[s1]
d = newd
arraystr += arraytabstr
else:
retstr += section + " = " + str(dump_value(o[section])) + '\n'
else:
retdict[section] = o[section]
retstr += arraystr
return (retstr, retdict)
def dump_value(v):
if isinstance(v, list):
t = []
retval = "["
for u in v:
t.append(dump_value(u))
while t != []:
s = []
for u in t:
if isinstance(u, list):
for r in u:
s.append(r)
else:
retval += " " + str(u) + ","
t = s
retval += "]"
return retval
if isinstance(v, (str, unicode)):
escapes = ['\\', '0', 'b', 'f', '/', 'n', 'r', 't', '"']
escapedchars = ['\\', '\0', '\b', '\f', '/', '\n', '\r', '\t', '\"']
for i in range(len(escapes)):
v = v.replace(escapedchars[i], "\\"+escapes[i])
return str('"'+v+'"')
if isinstance(v, bool):
return str(v).lower()
if isinstance(v, datetime.datetime):
return v.isoformat()[:19]+'Z'
if isinstance(v, float):
return '{0:f}'.format(decimal.Decimal(str(v)))
return v
def toml_merge_dict(a, b):
for k in a:
if isinstance(a[k], dict):
try:
b[k]
except KeyError:
continue
if isinstance(b[k], dict):
b[k] = toml_merge_dict(a[k], b[k])
else:
raise Exception("Can't merge dict and nondict in toml object")
a.update(b)
return a
| mpl-2.0 |
SauloAislan/ironic | ironic/drivers/modules/oneview/deploy_utils.py | 1 | 16020 | # Copyright 2016 Hewlett Packard Enterprise Development LP.
# Copyright 2016 Universidade Federal de Campina Grande
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_log import log as logging
from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
from ironic.drivers.modules.oneview import common
LOG = logging.getLogger(__name__)
oneview_exception = importutils.try_import('oneview_client.exceptions')
oneview_utils = importutils.try_import('oneview_client.utils')
hpOneView_exception = importutils.try_import('hpOneView.exceptions')
def get_properties():
return common.COMMON_PROPERTIES
def prepare(hponeview_client, task):
"""Apply Server Profile and update the node when preparing.
This method is responsible for applying a Server Profile to the Server
Hardware and add the uri of the applied Server Profile in the node's
'applied_server_profile_uri' field on properties/capabilities.
:param hponeview_client: an instance of the OneView client
:param task: A TaskManager object
:raises InstanceDeployFailure: If the node doesn't have the needed OneView
informations, if Server Hardware is in use by an OneView user, or
if the Server Profile can't be applied.
"""
if task.node.provision_state == states.DEPLOYING:
try:
instance_display_name = task.node.instance_info.get('display_name')
instance_uuid = task.node.instance_uuid
server_profile_name = (
"%(instance_name)s [%(instance_uuid)s]" %
{"instance_name": instance_display_name,
"instance_uuid": instance_uuid}
)
allocate_server_hardware_to_ironic(hponeview_client, task.node,
server_profile_name)
except exception.OneViewError as e:
raise exception.InstanceDeployFailure(node=task.node.uuid,
reason=e)
def tear_down(hponeview_client, task):
"""Remove Server profile and update the node when tear down.
This method is responsible for power a Server Hardware off, remove a Server
Profile from the Server Hardware and remove the uri of the applied Server
Profile from the node's 'applied_server_profile_uri' in
properties/capabilities.
:param hponeview_client: an instance of the OneView client
:param task: A TaskManager object
:raises InstanceDeployFailure: If node has no uri of applied Server
Profile, or if some error occur while deleting Server Profile.
"""
try:
deallocate_server_hardware_from_ironic(hponeview_client, task)
except exception.OneViewError as e:
raise exception.InstanceDeployFailure(node=task.node.uuid, reason=e)
def prepare_cleaning(hponeview_client, task):
"""Apply Server Profile and update the node when preparing cleaning.
This method is responsible for applying a Server Profile to the Server
Hardware and add the uri of the applied Server Profile in the node's
'applied_server_profile_uri' field on properties/capabilities.
:param hponeview_client: an instance of the OneView client
:param task: A TaskManager object
:raises NodeCleaningFailure: If the node doesn't have the needed OneView
informations, if Server Hardware is in use by an OneView user, or
if the Server Profile can't be applied.
"""
try:
server_profile_name = "Ironic Cleaning [%s]" % task.node.uuid
allocate_server_hardware_to_ironic(hponeview_client, task.node,
server_profile_name)
except exception.OneViewError as e:
oneview_error = common.SERVER_HARDWARE_ALLOCATION_ERROR
driver_internal_info = task.node.driver_internal_info
driver_internal_info['oneview_error'] = oneview_error
task.node.driver_internal_info = driver_internal_info
task.node.save()
raise exception.NodeCleaningFailure(node=task.node.uuid,
reason=e)
def tear_down_cleaning(hponeview_client, task):
"""Remove Server profile and update the node when tear down cleaning.
This method is responsible for power a Server Hardware off, remove a Server
Profile from the Server Hardware and remove the uri of the applied Server
Profile from the node's 'applied_server_profile_uri' in
properties/capabilities.
:param hponeview_client: an instance of the OneView client
:param task: A TaskManager object
:raises NodeCleaningFailure: If node has no uri of applied Server Profile,
or if some error occur while deleting Server Profile.
"""
try:
deallocate_server_hardware_from_ironic(hponeview_client, task)
except exception.OneViewError as e:
raise exception.NodeCleaningFailure(node=task.node.uuid, reason=e)
def _create_profile_from_template(
hponeview_client, server_profile_name,
server_hardware_uri, server_profile_template, deploy_plan_uri):
"""Create a server profile from a server profile template.
:param hponeview_client: an hpOneView Client instance
:param server_profile_name: the name of the new server profile
:param server_hardware_uri: the server_hardware assigned to server profile
:param server_profile_template: the server profile template
:returns: The new server profile generated with the name and server
hardware passed on parameters
"""
deployment_plan = {
"osDeploymentPlanUri": deploy_plan_uri,
"osCustomAttributes": [],
"osVolumeUri": None
}
server_profile = hponeview_client.server_profile_templates.get_new_profile(
server_profile_template
)
server_profile['name'] = server_profile_name
server_profile['serverHardwareUri'] = server_hardware_uri
server_profile['serverProfileTemplateUri'] = ""
server_profile['osDeploymentSettings'] = deployment_plan
hponeview_client.server_profiles.create(server_profile)
return hponeview_client.server_profiles.get_by_name(server_profile_name)
def _is_node_in_use(server_hardware, applied_sp_uri, by_oneview=False):
"""Check if node is in use by ironic or by OneView.
:param by_oneview: Boolean value. True when want to verify if node is in
use by OneView. False to verify if node is in use by
ironic.
:param node: an ironic node object
:returns: Boolean value. True if by_oneview param is also True and node is
in use by OneView, False otherwise. True if by_oneview param is
False and node is in use by ironic, False otherwise.
"""
operation = operator.ne if by_oneview else operator.eq
server_profile_uri = server_hardware.get('serverProfileUri')
return (server_profile_uri is not None and
operation(applied_sp_uri, server_profile_uri))
def is_node_in_use_by_oneview(client, node):
"""Check if node is in use by OneView user.
:param client: an instance of the OneView client
:param node: an ironic node object
:returns: Boolean value. True if node is in use by OneView,
False otherwise.
:raises OneViewError: if not possible to get OneView's informations
for the given node, if not possible to retrieve Server Hardware
from OneView.
"""
positive = _("Node '%s' is in use by OneView.") % node.uuid
negative = _("Node '%s' is not in use by OneView.") % node.uuid
def predicate(server_hardware, applied_sp_uri):
# Check if Profile exists in Oneview and it is different of the one
# applied by ironic
return _is_node_in_use(server_hardware, applied_sp_uri,
by_oneview=True)
return _check_applied_server_profile(client, node,
predicate, positive, negative)
def is_node_in_use_by_ironic(client, node):
"""Check if node is in use by ironic in OneView.
:param client: an instance of the OneView client
:param node: an ironic node object
:returns: Boolean value. True if node is in use by ironic,
False otherwise.
:raises OneViewError: if not possible to get OneView's information
for the given node, if not possible to retrieve Server Hardware
from OneView.
"""
positive = _("Node '%s' is in use by Ironic.") % node.uuid
negative = _("Node '%s' is not in use by Ironic.") % node.uuid
def predicate(server_hardware, applied_sp_uri):
# Check if Profile exists in Oneview and it is equals of the one
# applied by ironic
return _is_node_in_use(server_hardware, applied_sp_uri,
by_oneview=False)
return _check_applied_server_profile(client, node,
predicate, positive, negative)
def _check_applied_server_profile(client, node, predicate, positive, negative):
"""Check if node is in use by ironic in OneView.
:param hponeview_client: an instance of the OneView client
:param node: an ironic node object
:returns: Boolean value. True if node is in use by ironic,
False otherwise.
:raises OneViewError: if not possible to get OneView's information
for the given node, if not possible to retrieve Server Hardware
from OneView.
"""
oneview_info = common.get_oneview_info(node)
try:
server_hardware = client.server_hardware.get(
oneview_info.get('server_hardware_uri')
)
except hpOneView_exception.HPOneViewResourceNotFound as e:
msg = (_("Error while obtaining Server Hardware from node "
"%(node_uuid)s. Error: %(error)s") %
{'node_uuid': node.uuid, 'error': e})
raise exception.OneViewError(error=msg)
applied_sp_uri = node.driver_info.get('applied_server_profile_uri')
result = predicate(server_hardware, applied_sp_uri)
if result:
LOG.debug(positive)
else:
LOG.debug(negative)
return result
def _add_applied_server_profile_uri_field(node, applied_profile):
"""Add the applied Server Profile uri to a node.
:param node: an ironic node object
:param applied_profile: the server_profile that will be applied to node
"""
driver_info = node.driver_info
driver_info['applied_server_profile_uri'] = applied_profile.get('uri')
node.driver_info = driver_info
node.save()
def _del_applied_server_profile_uri_field(node):
"""Delete the applied Server Profile uri from a node if it exists.
:param node: an ironic node object
"""
driver_info = node.driver_info
driver_info.pop('applied_server_profile_uri', None)
node.driver_info = driver_info
node.save()
def allocate_server_hardware_to_ironic(hponeview_client, node,
server_profile_name):
"""Allocate Server Hardware to ironic.
:param hponeview_client: an instance of the OneView client
:param node: an ironic node object
:param server_profile_name: a formatted string with the Server Profile
name
:raises OneViewError: if an error occurs while allocating the Server
Hardware to ironic
"""
node_in_use_by_oneview = is_node_in_use_by_oneview(hponeview_client, node)
if not node_in_use_by_oneview:
oneview_info = common.get_oneview_info(node)
applied_sp_uri = node.driver_info.get('applied_server_profile_uri')
deploy_plan_uri = node.driver_info.get('deployment_plan_uri')
sh_uri = oneview_info.get("server_hardware_uri")
spt_uri = oneview_info.get("server_profile_template_uri")
server_hardware = hponeview_client.server_hardware.get(sh_uri)
# Don't have Server Profile on OneView but has
# `applied_server_profile_uri` on driver_info
if not server_hardware.get('serverProfileUri') and applied_sp_uri:
_del_applied_server_profile_uri_field(node)
LOG.info(
"Inconsistent 'applied_server_profile_uri' parameter "
"value in driver_info. There is no Server Profile "
"applied to node %(node_uuid)s. Value deleted.",
{"node_uuid": node.uuid}
)
# applied_server_profile_uri exists and is equal to Server profile
# applied on Hardware. Do not apply again.
if (
applied_sp_uri and server_hardware.get('serverProfileUri') and
server_hardware.get('serverProfileUri') == applied_sp_uri
):
LOG.info(
"The Server Profile %(applied_sp_uri)s was already applied "
"by ironic on node %(node_uuid)s. Reusing.",
{"node_uuid": node.uuid, "applied_sp_uri": applied_sp_uri}
)
return
try:
applied_profile = _create_profile_from_template(
hponeview_client, server_profile_name, sh_uri, spt_uri,
deploy_plan_uri
)
_add_applied_server_profile_uri_field(node, applied_profile)
LOG.info(
"Server Profile %(server_profile_uuid)s was successfully"
" applied to node %(node_uuid)s.",
{"node_uuid": node.uuid,
"server_profile_uuid": applied_profile.get('uri')}
)
except hpOneView_exception.HPOneViewInvalidResource as e:
LOG.error("An error occurred during allocating server "
"hardware to ironic during prepare: %s", e)
raise exception.OneViewError(error=e)
else:
msg = _("Node %s is already in use by OneView.") % node.uuid
raise exception.OneViewError(error=msg)
def deallocate_server_hardware_from_ironic(hponeview_client, task):
"""Deallocate Server Hardware from ironic.
:param hponeview_client: an instance of the OneView client
:param task: a TaskManager object
:raises OneViewError: if an error occurs while deallocating the Server
Hardware to ironic
"""
node = task.node
if is_node_in_use_by_ironic(hponeview_client, node):
oneview_info = common.get_oneview_info(node)
server_profile_uri = oneview_info.get('applied_server_profile_uri')
try:
task.driver.power.set_power_state(task, states.POWER_OFF)
hponeview_client.server_profiles.delete(server_profile_uri)
_del_applied_server_profile_uri_field(node)
LOG.info("Server Profile %(server_profile_uuid)s was deleted "
"from node %(node_uuid)s in OneView.",
{'server_profile_uri': server_profile_uri,
'node_uuid': node.uuid})
except (ValueError, hpOneView_exception.HPOneViewException) as e:
msg = (_("Error while deleting applied Server Profile from node "
"%(node_uuid)s. Error: %(error)s") %
{'node_uuid': node.uuid, 'error': e})
raise exception.OneViewError(error=msg)
else:
LOG.warning("Cannot deallocate node %(node_uuid)s "
"in OneView because it is not in use by "
"ironic.", {'node_uuid': node.uuid})
| apache-2.0 |
mscuthbert/abjad | abjad/tools/abjadbooktools/CodeBlockSpecifier.py | 1 | 2586 | # -*- encoding: utf-8 -*-
from abjad.tools import abctools
class CodeBlockSpecifier(abctools.AbjadValueObject):
r'''A code block specifier.
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Internals'
__slots__ = (
'_allow_exceptions',
'_hide',
'_strip_prompt',
'_text_width',
)
### INITIALIZER ###
def __init__(
self,
allow_exceptions=None,
hide=None,
strip_prompt=None,
text_width=None,
):
self._allow_exceptions = bool(allow_exceptions) or None
self._hide = bool(hide) or None
self._strip_prompt = bool(strip_prompt) or None
if text_width is not None:
if text_width is True:
text_width = 80
try:
text_width = int(text_width)
if text_width < 1:
text_width = None
except:
text_width = None
self._text_width = text_width
### PUBLIC METHODS ###
@classmethod
def from_options(cls, **options):
r'''Creates code block specifier from `options` dictionary.
Returns code block specifier.
'''
allow_exceptions = options.get('allow_exceptions', None) or None
hide = options.get('hide', None) or None
strip_prompt = options.get('strip_prompt', None) or None
text_width = options.get('text_width', None) or None
if all(_ is None for _ in (
allow_exceptions,
hide,
strip_prompt,
text_width,
)):
return None
return cls(
allow_exceptions=allow_exceptions,
hide=hide,
strip_prompt=strip_prompt,
text_width=text_width,
)
### PUBLIC PROPERTIES ###
@property
def allow_exceptions(self):
r'''Is true if code block allows exceptions. Otherwise false.
Returns boolean.
'''
return self._allow_exceptions
@property
def hide(self):
r'''Is true if code block should be hidden. Otherwise false.
Returns boolean.
'''
return self._hide
@property
def strip_prompt(self):
r'''Is true if code block should strip Python prompt from output.
Otherwise false.
Returns boolean.
'''
return self._strip_prompt
@property
def text_width(self):
r'''Gets text width wrap of code block.
Returns integer or none.
'''
return self._text_width | gpl-3.0 |
gcodetogit/depot_tools | third_party/boto/manage/server.py | 63 | 21995 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 server
"""
from __future__ import with_statement
import boto.ec2
from boto.mashups.iobject import IObject
from boto.pyami.config import BotoConfigPath, Config
from boto.sdb.db.model import Model
from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty, CalculatedProperty
from boto.manage import propget
from boto.ec2.zone import Zone
from boto.ec2.keypair import KeyPair
import os, time, StringIO
from contextlib import closing
from boto.exception import EC2ResponseError
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge',
'c1.medium', 'c1.xlarge',
'm2.2xlarge', 'm2.4xlarge']
class Bundler(object):
def __init__(self, server, uname='root'):
from boto.manage.cmdshell import SSHClient
self.server = server
self.uname = uname
self.ssh_client = SSHClient(server, uname=uname)
def copy_x509(self, key_file, cert_file):
print '\tcopying cert and pk over to /mnt directory on server'
self.ssh_client.open_sftp()
path, name = os.path.split(key_file)
self.remote_key_file = '/mnt/%s' % name
self.ssh_client.put_file(key_file, self.remote_key_file)
path, name = os.path.split(cert_file)
self.remote_cert_file = '/mnt/%s' % name
self.ssh_client.put_file(cert_file, self.remote_cert_file)
print '...complete!'
def bundle_image(self, prefix, size, ssh_key):
command = ""
if self.uname != 'root':
command = "sudo "
command += 'ec2-bundle-vol '
command += '-c %s -k %s ' % (self.remote_cert_file, self.remote_key_file)
command += '-u %s ' % self.server._reservation.owner_id
command += '-p %s ' % prefix
command += '-s %d ' % size
command += '-d /mnt '
if self.server.instance_type == 'm1.small' or self.server.instance_type == 'c1.medium':
command += '-r i386'
else:
command += '-r x86_64'
return command
def upload_bundle(self, bucket, prefix, ssh_key):
command = ""
if self.uname != 'root':
command = "sudo "
command += 'ec2-upload-bundle '
command += '-m /mnt/%s.manifest.xml ' % prefix
command += '-b %s ' % bucket
command += '-a %s ' % self.server.ec2.aws_access_key_id
command += '-s %s ' % self.server.ec2.aws_secret_access_key
return command
def bundle(self, bucket=None, prefix=None, key_file=None, cert_file=None,
size=None, ssh_key=None, fp=None, clear_history=True):
iobject = IObject()
if not bucket:
bucket = iobject.get_string('Name of S3 bucket')
if not prefix:
prefix = iobject.get_string('Prefix for AMI file')
if not key_file:
key_file = iobject.get_filename('Path to RSA private key file')
if not cert_file:
cert_file = iobject.get_filename('Path to RSA public cert file')
if not size:
size = iobject.get_int('Size (in MB) of bundled image')
if not ssh_key:
ssh_key = self.server.get_ssh_key_file()
self.copy_x509(key_file, cert_file)
if not fp:
fp = StringIO.StringIO()
fp.write('sudo mv %s /mnt/boto.cfg; ' % BotoConfigPath)
fp.write('mv ~/.ssh/authorized_keys /mnt/authorized_keys; ')
if clear_history:
fp.write('history -c; ')
fp.write(self.bundle_image(prefix, size, ssh_key))
fp.write('; ')
fp.write(self.upload_bundle(bucket, prefix, ssh_key))
fp.write('; ')
fp.write('sudo mv /mnt/boto.cfg %s; ' % BotoConfigPath)
fp.write('mv /mnt/authorized_keys ~/.ssh/authorized_keys')
command = fp.getvalue()
print 'running the following command on the remote server:'
print command
t = self.ssh_client.run(command)
print '\t%s' % t[0]
print '\t%s' % t[1]
print '...complete!'
print 'registering image...'
self.image_id = self.server.ec2.register_image(name=prefix, image_location='%s/%s.manifest.xml' % (bucket, prefix))
return self.image_id
class CommandLineGetter(object):
def get_ami_list(self):
my_amis = []
for ami in self.ec2.get_all_images():
# hack alert, need a better way to do this!
if ami.location.find('pyami') >= 0:
my_amis.append((ami.location, ami))
return my_amis
def get_region(self, params):
region = params.get('region', None)
if isinstance(region, str) or isinstance(region, unicode):
region = boto.ec2.get_region(region)
params['region'] = region
if not region:
prop = self.cls.find_property('region_name')
params['region'] = propget.get(prop, choices=boto.ec2.regions)
self.ec2 = params['region'].connect()
def get_name(self, params):
if not params.get('name', None):
prop = self.cls.find_property('name')
params['name'] = propget.get(prop)
def get_description(self, params):
if not params.get('description', None):
prop = self.cls.find_property('description')
params['description'] = propget.get(prop)
def get_instance_type(self, params):
if not params.get('instance_type', None):
prop = StringProperty(name='instance_type', verbose_name='Instance Type',
choices=InstanceTypes)
params['instance_type'] = propget.get(prop)
def get_quantity(self, params):
if not params.get('quantity', None):
prop = IntegerProperty(name='quantity', verbose_name='Number of Instances')
params['quantity'] = propget.get(prop)
def get_zone(self, params):
if not params.get('zone', None):
prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
choices=self.ec2.get_all_zones)
params['zone'] = propget.get(prop)
def get_ami_id(self, params):
valid = False
while not valid:
ami = params.get('ami', None)
if not ami:
prop = StringProperty(name='ami', verbose_name='AMI')
ami = propget.get(prop)
try:
rs = self.ec2.get_all_images([ami])
if len(rs) == 1:
valid = True
params['ami'] = rs[0]
except EC2ResponseError:
pass
def get_group(self, params):
group = params.get('group', None)
if isinstance(group, str) or isinstance(group, unicode):
group_list = self.ec2.get_all_security_groups()
for g in group_list:
if g.name == group:
group = g
params['group'] = g
if not group:
prop = StringProperty(name='group', verbose_name='EC2 Security Group',
choices=self.ec2.get_all_security_groups)
params['group'] = propget.get(prop)
def get_key(self, params):
keypair = params.get('keypair', None)
if isinstance(keypair, str) or isinstance(keypair, unicode):
key_list = self.ec2.get_all_key_pairs()
for k in key_list:
if k.name == keypair:
keypair = k.name
params['keypair'] = k.name
if not keypair:
prop = StringProperty(name='keypair', verbose_name='EC2 KeyPair',
choices=self.ec2.get_all_key_pairs)
params['keypair'] = propget.get(prop).name
def get(self, cls, params):
self.cls = cls
self.get_region(params)
self.ec2 = params['region'].connect()
self.get_name(params)
self.get_description(params)
self.get_instance_type(params)
self.get_zone(params)
self.get_quantity(params)
self.get_ami_id(params)
self.get_group(params)
self.get_key(params)
class Server(Model):
#
# The properties of this object consists of real properties for data that
# is not already stored in EC2 somewhere (e.g. name, description) plus
# calculated properties for all of the properties that are already in
# EC2 (e.g. hostname, security groups, etc.)
#
name = StringProperty(unique=True, verbose_name="Name")
description = StringProperty(verbose_name="Description")
region_name = StringProperty(verbose_name="EC2 Region Name")
instance_id = StringProperty(verbose_name="EC2 Instance ID")
elastic_ip = StringProperty(verbose_name="EC2 Elastic IP Address")
production = BooleanProperty(verbose_name="Is This Server Production", default=False)
ami_id = CalculatedProperty(verbose_name="AMI ID", calculated_type=str, use_method=True)
zone = CalculatedProperty(verbose_name="Availability Zone Name", calculated_type=str, use_method=True)
hostname = CalculatedProperty(verbose_name="Public DNS Name", calculated_type=str, use_method=True)
private_hostname = CalculatedProperty(verbose_name="Private DNS Name", calculated_type=str, use_method=True)
groups = CalculatedProperty(verbose_name="Security Groups", calculated_type=list, use_method=True)
security_group = CalculatedProperty(verbose_name="Primary Security Group Name", calculated_type=str, use_method=True)
key_name = CalculatedProperty(verbose_name="Key Name", calculated_type=str, use_method=True)
instance_type = CalculatedProperty(verbose_name="Instance Type", calculated_type=str, use_method=True)
status = CalculatedProperty(verbose_name="Current Status", calculated_type=str, use_method=True)
launch_time = CalculatedProperty(verbose_name="Server Launch Time", calculated_type=str, use_method=True)
console_output = CalculatedProperty(verbose_name="Console Output", calculated_type=file, use_method=True)
packages = []
plugins = []
@classmethod
def add_credentials(cls, cfg, aws_access_key_id, aws_secret_access_key):
if not cfg.has_section('Credentials'):
cfg.add_section('Credentials')
cfg.set('Credentials', 'aws_access_key_id', aws_access_key_id)
cfg.set('Credentials', 'aws_secret_access_key', aws_secret_access_key)
if not cfg.has_section('DB_Server'):
cfg.add_section('DB_Server')
cfg.set('DB_Server', 'db_type', 'SimpleDB')
cfg.set('DB_Server', 'db_name', cls._manager.domain.name)
@classmethod
def create(cls, config_file=None, logical_volume = None, cfg = None, **params):
"""
Create a new instance based on the specified configuration file or the specified
configuration and the passed in parameters.
If the config_file argument is not None, the configuration is read from there.
Otherwise, the cfg argument is used.
The config file may include other config files with a #import reference. The included
config files must reside in the same directory as the specified file.
The logical_volume argument, if supplied, will be used to get the current physical
volume ID and use that as an override of the value specified in the config file. This
may be useful for debugging purposes when you want to debug with a production config
file but a test Volume.
The dictionary argument may be used to override any EC2 configuration values in the
config file.
"""
if config_file:
cfg = Config(path=config_file)
if cfg.has_section('EC2'):
# include any EC2 configuration values that aren't specified in params:
for option in cfg.options('EC2'):
if option not in params:
params[option] = cfg.get('EC2', option)
getter = CommandLineGetter()
getter.get(cls, params)
region = params.get('region')
ec2 = region.connect()
cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)
ami = params.get('ami')
kp = params.get('keypair')
group = params.get('group')
zone = params.get('zone')
# deal with possibly passed in logical volume:
if logical_volume != None:
cfg.set('EBS', 'logical_volume_name', logical_volume.name)
cfg_fp = StringIO.StringIO()
cfg.write(cfg_fp)
# deal with the possibility that zone and/or keypair are strings read from the config file:
if isinstance(zone, Zone):
zone = zone.name
if isinstance(kp, KeyPair):
kp = kp.name
reservation = ami.run(min_count=1,
max_count=params.get('quantity', 1),
key_name=kp,
security_groups=[group],
instance_type=params.get('instance_type'),
placement = zone,
user_data = cfg_fp.getvalue())
l = []
i = 0
elastic_ip = params.get('elastic_ip')
instances = reservation.instances
if elastic_ip != None and instances.__len__() > 0:
instance = instances[0]
print 'Waiting for instance to start so we can set its elastic IP address...'
# Sometimes we get a message from ec2 that says that the instance does not exist.
# Hopefully the following delay will giv eec2 enough time to get to a stable state:
time.sleep(5)
while instance.update() != 'running':
time.sleep(1)
instance.use_ip(elastic_ip)
print 'set the elastic IP of the first instance to %s' % elastic_ip
for instance in instances:
s = cls()
s.ec2 = ec2
s.name = params.get('name') + '' if i==0 else str(i)
s.description = params.get('description')
s.region_name = region.name
s.instance_id = instance.id
if elastic_ip and i == 0:
s.elastic_ip = elastic_ip
s.put()
l.append(s)
i += 1
return l
@classmethod
def create_from_instance_id(cls, instance_id, name, description=''):
regions = boto.ec2.regions()
for region in regions:
ec2 = region.connect()
try:
rs = ec2.get_all_instances([instance_id])
except:
rs = []
if len(rs) == 1:
s = cls()
s.ec2 = ec2
s.name = name
s.description = description
s.region_name = region.name
s.instance_id = instance_id
s._reservation = rs[0]
for instance in s._reservation.instances:
if instance.id == instance_id:
s._instance = instance
s.put()
return s
return None
@classmethod
def create_from_current_instances(cls):
servers = []
regions = boto.ec2.regions()
for region in regions:
ec2 = region.connect()
rs = ec2.get_all_instances()
for reservation in rs:
for instance in reservation.instances:
try:
Server.find(instance_id=instance.id).next()
boto.log.info('Server for %s already exists' % instance.id)
except StopIteration:
s = cls()
s.ec2 = ec2
s.name = instance.id
s.region_name = region.name
s.instance_id = instance.id
s._reservation = reservation
s.put()
servers.append(s)
return servers
def __init__(self, id=None, **kw):
Model.__init__(self, id, **kw)
self.ssh_key_file = None
self.ec2 = None
self._cmdshell = None
self._reservation = None
self._instance = None
self._setup_ec2()
def _setup_ec2(self):
if self.ec2 and self._instance and self._reservation:
return
if self.id:
if self.region_name:
for region in boto.ec2.regions():
if region.name == self.region_name:
self.ec2 = region.connect()
if self.instance_id and not self._instance:
try:
rs = self.ec2.get_all_instances([self.instance_id])
if len(rs) >= 1:
for instance in rs[0].instances:
if instance.id == self.instance_id:
self._reservation = rs[0]
self._instance = instance
except EC2ResponseError:
pass
def _status(self):
status = ''
if self._instance:
self._instance.update()
status = self._instance.state
return status
def _hostname(self):
hostname = ''
if self._instance:
hostname = self._instance.public_dns_name
return hostname
def _private_hostname(self):
hostname = ''
if self._instance:
hostname = self._instance.private_dns_name
return hostname
def _instance_type(self):
it = ''
if self._instance:
it = self._instance.instance_type
return it
def _launch_time(self):
lt = ''
if self._instance:
lt = self._instance.launch_time
return lt
def _console_output(self):
co = ''
if self._instance:
co = self._instance.get_console_output()
return co
def _groups(self):
gn = []
if self._reservation:
gn = self._reservation.groups
return gn
def _security_group(self):
groups = self._groups()
if len(groups) >= 1:
return groups[0].id
return ""
def _zone(self):
zone = None
if self._instance:
zone = self._instance.placement
return zone
def _key_name(self):
kn = None
if self._instance:
kn = self._instance.key_name
return kn
def put(self):
Model.put(self)
self._setup_ec2()
def delete(self):
if self.production:
raise ValueError("Can't delete a production server")
#self.stop()
Model.delete(self)
def stop(self):
if self.production:
raise ValueError("Can't delete a production server")
if self._instance:
self._instance.stop()
def terminate(self):
if self.production:
raise ValueError("Can't delete a production server")
if self._instance:
self._instance.terminate()
def reboot(self):
if self._instance:
self._instance.reboot()
def wait(self):
while self.status != 'running':
time.sleep(5)
def get_ssh_key_file(self):
if not self.ssh_key_file:
ssh_dir = os.path.expanduser('~/.ssh')
if os.path.isdir(ssh_dir):
ssh_file = os.path.join(ssh_dir, '%s.pem' % self.key_name)
if os.path.isfile(ssh_file):
self.ssh_key_file = ssh_file
if not self.ssh_key_file:
iobject = IObject()
self.ssh_key_file = iobject.get_filename('Path to OpenSSH Key file')
return self.ssh_key_file
def get_cmdshell(self):
if not self._cmdshell:
import cmdshell
self.get_ssh_key_file()
self._cmdshell = cmdshell.start(self)
return self._cmdshell
def reset_cmdshell(self):
self._cmdshell = None
def run(self, command):
with closing(self.get_cmdshell()) as cmd:
status = cmd.run(command)
return status
def get_bundler(self, uname='root'):
self.get_ssh_key_file()
return Bundler(self, uname)
def get_ssh_client(self, uname='root', ssh_pwd=None):
from boto.manage.cmdshell import SSHClient
self.get_ssh_key_file()
return SSHClient(self, uname=uname, ssh_pwd=ssh_pwd)
def install(self, pkg):
return self.run('apt-get -y install %s' % pkg)
| bsd-3-clause |
faarwa/EngSocP5 | zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/Scanner/Fortran.py | 34 | 14360 | """SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Fortran.py 5023 2010/06/14 22:05:46 scons"
import re
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
SCons.Scanner.Current.__init__(self, *args, **kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
d = {}
for m in defmodules:
d[m] = 1
modules = [m for m in modules if m not in d]
#modules = self.undefinedModules(modules, defmodules)
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = [x.lower() + suffix for x in modules]
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
return [pair[1] for pair in sorted(nodes)]
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
jdemon519/cfme_tests | cfme/tests/optimize/test_bottlenecks.py | 2 | 4652 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from datetime import timedelta
from cfme.optimize.bottlenecks import Bottlenecks
from utils import conf
from utils.appliance.implementations.ui import navigate_to
from utils.ssh import SSHClient
from utils.version import current_version
@pytest.fixture(scope="module")
def temp_appliance_extended_db(temp_appliance_preconfig):
app = temp_appliance_preconfig
app.stop_evm_service()
app.extend_db_partition()
app.start_evm_service()
return app
@pytest.fixture(scope="module")
def db_tbl(temp_appliance_extended_db):
app = temp_appliance_extended_db
return app.db['bottleneck_events']
@pytest.fixture(scope="module")
def db_events(temp_appliance_extended_db, db_tbl):
app = temp_appliance_extended_db
return app.db.session.query(db_tbl.timestamp,
db_tbl.resource_type, db_tbl.resource_name, db_tbl.event_type, db_tbl.severity, db_tbl.message)
@pytest.fixture(scope="module")
def db_restore(temp_appliance_extended_db):
app = temp_appliance_extended_db
app.stop_evm_service()
app.drop_database()
db_storage_hostname = conf.cfme_data['bottlenecks']['hostname']
db_storage = SSHClient(hostname=db_storage_hostname, **conf.credentials['bottlenecks'])
with db_storage as ssh:
# Different files for different versions
ver = "_56" if current_version() < '5.7' else ""
rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric())
ssh.get_file("/home/backups/otsuman_db_bottlenecks/v2_key{}".format(ver), rand_filename)
dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric())
ssh.get_file("/home/backups/otsuman_db_bottlenecks/db.backup{}".format(ver), dump_filename)
region_filename = "/tmp/REGION_{}".format(fauxfactory.gen_alphanumeric())
ssh.get_file("/home/backups/otsuman_db_bottlenecks/REGION{}".format(ver), region_filename)
guid_filename = "/tmp/GUID_{}".format(fauxfactory.gen_alphanumeric())
ssh.get_file("/home/backups/otsuman_db_bottlenecks/GUID{}".format(ver), guid_filename)
with app.ssh_client as ssh:
ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key")
ssh.put_file(dump_filename, "/tmp/evm_db.backup")
ssh.put_file(region_filename, "/var/www/miq/vmdb/REGION")
ssh.put_file(guid_filename, "/var/www/miq/vmdb/GUID")
app.restore_database()
app.start_evm_service()
app.wait_for_web_ui()
@pytest.mark.tier(1)
def test_bottlenecks_event_groups(temp_appliance_extended_db, db_restore, db_tbl, db_events):
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
# Enabling this option to show all possible values
view.report.show_host_events.fill(True)
view.report.event_groups.fill('Capacity')
rows = view.report.event_details.rows()
# Compare number of rows in bottleneck's table with number of rows in db
assert sum(1 for row in rows) == db_events.filter(db_tbl.event_type == 'DiskUsage').count()
view.report.event_groups.fill('Utilization')
rows = view.report.event_details.rows()
assert sum(1 for row in rows) == db_events.filter(db_tbl.event_type != 'DiskUsage').count()
@pytest.mark.tier(1)
def test_bottlenecks_show_host_events(temp_appliance_extended_db, db_restore, db_events):
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
view.report.show_host_events.fill(False)
rows = view.report.event_details.rows(type='Host / Node')
# Checking that rows with value 'Host / Node' absent in table
assert not sum(1 for row in rows)
view.report.show_host_events.fill(True)
rows = view.report.event_details.rows()
# Compare number of rows in bottleneck's table with number of rows in db
assert sum(1 for row in rows) == db_events.count()
@pytest.mark.tier(1)
def test_bottlenecks_time_zome(temp_appliance_extended_db, db_restore, db_tbl, db_events):
with temp_appliance_extended_db:
view = navigate_to(Bottlenecks, 'All')
row = view.report.event_details[0]
# Selecting row by uniq value
db_row = db_events.filter(db_tbl.message == row[5].text)
# Compare bottleneck's table timestamp with db
assert row[0].text == db_row[0][0].strftime("%m/%d/%y %H:%M:%S UTC")
# Changing time zone
view.report.time_zone.fill('(GMT-04:00) La Paz')
row = view.report.event_details[0]
assert row[0].text == (db_row[0][0] - timedelta(hours=4)).strftime("%m/%d/%y %H:%M:%S BOT")
| gpl-2.0 |
Strubbl/dinorss | site_check.py | 1 | 2974 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Wannabe RSS
"""
import chardet
import codecs
import difflib
import os
import time
import urllib.error
import urllib.request
from htmldiff.diff import textDiff
import config
from functions import is_content_different
from functions import save_file
from functions import to_unicode_or_bust
__author__ = config.author
__email__ = config.email
__version__ = config.version
__credits__ = config.credits
cache_dir = config.sites_dir
sites = config.sites
### main program
if __name__ == "__main__":
print("Wannabe RSS")
for site in sites:
# iterate through all sites to check
site_path = cache_dir + os.sep + site[0]
if not os.path.exists(site_path):
os.makedirs(site_path)
print(site_path)
# get recent content of the site
frecent = urllib.request.urlopen(site[1])
try:
rawdata = frecent.read()
encoding = chardet.detect(rawdata)
recent_content = rawdata.decode(encoding['encoding'])
except urllib.error.URLError as e:
print('URL error: ' + e)
finally:
frecent.close()
# get all files from the site dir
for path, dirs, site_files in os.walk(site_path):
site_files.sort()
site_files.reverse() # to have the most recent on top
print('site files: ' + str(site_files))
print('no of files: ' + str(len(site_files)))
if len(site_files) > 0:
# we have already checked this site, so open the last content
last_file = site_files[0]
flast = codecs.open(site_path + os.sep + last_file, 'r', encoding='utf-8')
last_content = flast.read()
flast.close()
else:
# never checked that site before, so set last content same as recent content so that there is no diff, creepy workaround
last_content = recent_content
# and save the recent content to a file cause it is completely new
save_file(site_path + os.sep + str(int(time.time())), recent_content)
# print 'recent: ' + recent_content
# print 'last: ' + last_content
is_diff = is_content_different(last_content, recent_content, site[2], site[3])
print('is different? ' + str(is_diff))
if is_diff:
print('saving recent content')
save_file(site_path + os.sep + str(int(time.time())), recent_content)
else:
print('yet no diff, no need to save')
# cleanup: delete files more than max site caches config var
if len(site_files) > config.max_site_caches:
no_files_to_delete = len(site_files) - config.max_site_caches
print('no of files to delete: ' + str(no_files_to_delete))
for i in range(0, no_files_to_delete): # not -1 one, cause we created an item, which is not in the list so far
print('deleting item no. ' + str(i) + ': ' + site_files[i])
os.remove(os.path.join(site_path, site_files[i]))
else:
print('site versions ' + str(len(site_files)) + ' is less equal maximum of ' + str(config.max_site_caches))
print('')
| gpl-3.0 |
LeoHeo/fastube | fastube/fastube/settings/partials/auth.py | 1 | 1885 | # -*- coding: utf-8 -*-
import os
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Auth Model
AUTH_USER_MODEL = "users.User"
# Login
LOGIN_URL = "/login/"
SIGNUP_SUCCESS_MESSAGE = "성공적으로 회원가입이 되었습니다."
LOGIN_SUCCESS_MESSAGE = "성공적으로 로그인이 되었습니다."
LOGOUT_SUCCESS_MESSAGE = "성공적으로 로그아웃이 되었습니다."
SOCIAL_AUTH_URL_NAMESPACE = 'social'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.kakao.KakaoOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get("SOCIAL_AUTH_FACEBOOK_KEY")
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get("SOCIAL_AUTH_FACEBOOK_SECRET")
SOCIAL_AUTH_KAKAO_KEY = os.environ.get("SOCIAL_AUTH_KAKAO_KEY")
SOCIAL_AUTH_KAKAO_SECRET = os.environ.get("SOCIAL_AUTH_KAKAO_SECRET")
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/"
| mit |
adrpar/incubator-airflow | airflow/operators/jdbc_operator.py | 46 | 2402 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'janomar'
import logging
from airflow.hooks.jdbc_hook import JdbcHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class JdbcOperator(BaseOperator):
"""
Executes sql code in a database using jdbc driver.
Requires jaydebeapi.
:param jdbc_url: driver specific connection url with string variables, e.g. for exasol jdbc:exa:{0}:{1};schema={2}
Template vars are defined like this: {0} = hostname, {1} = port, {2} = dbschema, {3} = extra
:type jdbc_url: string
:param jdbc_driver_name: classname of the specific jdbc driver, for exasol com.exasol.jdbc.EXADriver
:type jdbc_driver_name: string
:param jdbc_driver_loc: absolute path to jdbc driver location, for example /var/exasol/exajdbc.jar
:type jdbc_driver_loc: string
:param conn_id: reference to a predefined database
:type conn_id: string
:param sql: the sql code to be executed
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(
self, sql,
jdbc_conn_id='jdbc_default', autocommit=False, parameters=None,
*args, **kwargs):
super(JdbcOperator, self).__init__(*args, **kwargs)
self.parameters = parameters
self.sql = sql
self.jdbc_conn_id = jdbc_conn_id
self.autocommit = autocommit
def execute(self, context):
logging.info('Executing: ' + str(self.sql))
self.hook = JdbcHook(jdbc_conn_id=self.jdbc_conn_id)
self.hook.run(self.sql, self.autocommit, parameters=self.parameters)
| apache-2.0 |
fengsp/tornado | docs/conf.py | 8 | 2959 | # Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
import time
sys.path.insert(0, os.path.abspath(".."))
import tornado
master_doc = "index"
project = "Tornado"
copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
version = release = tornado.version
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = [
"tornado.platform.asyncio",
"tornado.platform.caresresolver",
"tornado.platform.twisted",
]
# I wish this could go in a per-module file...
coverage_ignore_classes = [
# tornado.concurrent
"TracebackFuture",
# tornado.gen
"Runner",
# tornado.ioloop
"PollIOLoop",
# tornado.web
"ChunkedTransferEncoding",
"GZipContentEncoding",
"OutputTransform",
"TemplateModule",
"url",
# tornado.websocket
"WebSocketProtocol",
"WebSocketProtocol13",
"WebSocketProtocol76",
]
coverage_ignore_functions = [
# various modules
"doctests",
"main",
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
]
html_favicon = 'favicon.ico'
latex_documents = [
('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
]
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
extlinks = {
'current_tarball': (
'https://pypi.python.org/packages/source/t/tornado/tornado-%s.tar.g%%s' % version,
'tornado-%s.tar.g' % version),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4/', None),
}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| apache-2.0 |
gtko/CouchPotatoServer | libs/requests/packages/chardet/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| gpl-3.0 |
wltrimbl/kmerspectrumanalyzer | ksatools/filter-annotated.py | 1 | 1960 | #!/usr/bin/env python
import sys
import os
import re
from argparse import ArgumentParser
from Bio import SeqIO
def gccontent(sq):
'''function returns gc content of sequence'''
l = len(sq)
gc_count = 0
for ch in sq:
if ch in ['G', 'C', 'g', 'c']:
gc_count += 1
try:
r = float(float(gc_count) / l)
except:
r = 0
return r
if __name__ == '__main__':
usage = "usage: %prog -i <file1> -o <rejectfile> \n filters interleaved fastq on regex in header, outputs to std out"
parser = ArgumentParser(description=usage)
parser.add_argument("-i", "--in", dest="inp",
default=None, help="input file")
parser.add_argument("-o", "--out", dest="outp",
default=None, help="reject fastq file")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=True, help="Verbose [default off]")
args = parser.parse_args()
if not (args.inp and os.path.isfile(args.inp)):
parser.error("Missing input file")
in_fh = open(args.inp)
if(args.outp):
out_fh = open(args.outp, "w")
sys.stderr.write("Processing %s ... " % (args.inp,))
pattern = re.compile(r"med21mer=(\d*)")
records = SeqIO.parse(in_fh, "fastq")
for seq_record1 in records:
seq_record2 = records.next()
m1 = int(pattern.search(seq_record1.description).group(1))
m2 = int(pattern.search(seq_record2.description).group(1))
g1 = gccontent(str(seq_record1.seq))
g2 = gccontent(str(seq_record2.seq))
# print("%d\t%d\t%.2f\t%.2f"%(m1, m2, g1, g2))
if 1:
if m1 > 75 and m2 > 75:
SeqIO.write([seq_record1, seq_record2], sys.stdout, "fastq")
else:
if args.outp:
SeqIO.write([seq_record1, seq_record2], out_fh, "fastq")
if args.verbose:
sys.stderr.write("Done. \n")
| bsd-2-clause |
roxypop/prueba | ejemplo.py | 1 | 1108 | _author_ = 'Rosy'
def sumados():
print 10 * 20
def divicion(a, b):
result = a / b
print result
def cast():
lista=[1,2,3, "hola"{"key1":"Rosa","key2":"Armando"}(1,2,3)]
tupla=(1,2,3)
diccionario={"key1":"Rosa","key2":"Armando"}
for i in lista:
print i
"""for k,v in diccionario.items():
print "%s %s" %(k,v)"""
def main():
sumados()
divicion(10, 20)
class Estudiante(object):
def __init__(self, nombre, edad):
self.nombre = nombre
self.edad = edad
def hola(self):
return self.nombre
def esMayor(self):
if self.edad >= 18:
return True
else:
return False
def exception():
try:
3 / 0
except Exception:
print "Error"
def main():
e = Estudiante("Rosa", 22)
print "hola %s"%e.hola()
if e.esMayor():
print "Es mayor de edad"
else:
print "Es menor de edad"
contador = 0
while contador <= 0:
print contador
contador +=1
exception()
print cast()
if __name__ == "__main__":
main()
| gpl-2.0 |
Chilledheart/chromium | tools/telemetry/telemetry/internal/actions/drag.py | 29 | 4397 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A Telemetry page_action that performs the "drag" action on pages.
Action parameters are:
- selector: If no selector is defined then the action attempts to drag the
document element on the page.
- element_function: CSS selector used to evaluate callback when test completes
- text: The element with exact text is selected.
- left_start_ratio: ratio of start point's left coordinate to the element
width.
- top_start_ratio: ratio of start point's top coordinate to the element height.
- left_end_ratio: ratio of end point's left coordinate to the element width.
- left_end_ratio: ratio of end point's top coordinate to the element height.
- speed_in_pixels_per_second: speed of the drag gesture in pixels per second.
- use_touch: boolean value to specify if gesture should use touch input or not.
"""
import os
from telemetry.internal.actions import page_action
class DragAction(page_action.PageAction):
def __init__(self, selector=None, text=None, element_function=None,
left_start_ratio=None, top_start_ratio=None, left_end_ratio=None,
top_end_ratio=None, speed_in_pixels_per_second=800,
use_touch=False,
synthetic_gesture_source=page_action.GESTURE_SOURCE_DEFAULT):
super(DragAction, self).__init__()
self._selector = selector
self._text = text
self._element_function = element_function
self._left_start_ratio = left_start_ratio
self._top_start_ratio = top_start_ratio
self._left_end_ratio = left_end_ratio
self._top_end_ratio = top_end_ratio
self._speed = speed_in_pixels_per_second
self._use_touch = use_touch
self._synthetic_gesture_source = ('chrome.gpuBenchmarking.%s_INPUT' %
synthetic_gesture_source)
def WillRunAction(self, tab):
for js_file in ['gesture_common.js', 'drag.js']:
with open(os.path.join(os.path.dirname(__file__), js_file)) as f:
js = f.read()
tab.ExecuteJavaScript(js)
# Fail if browser doesn't support synthetic drag gestures.
if not tab.EvaluateJavaScript('window.__DragAction_SupportedByBrowser()'):
raise page_action.PageActionNotSupported(
'Synthetic drag not supported for this browser')
# Fail if this action requires touch and we can't send touch events.
if self._use_touch:
if not page_action.IsGestureSourceTypeSupported(tab, 'touch'):
raise page_action.PageActionNotSupported(
'Touch drag not supported for this browser')
if (self._synthetic_gesture_source ==
'chrome.gpuBenchmarking.MOUSE_INPUT'):
raise page_action.PageActionNotSupported(
'Drag requires touch on this page but mouse input was requested')
done_callback = 'function() { window.__dragActionDone = true; }'
tab.ExecuteJavaScript('''
window.__dragActionDone = false;
window.__dragAction = new __DragAction(%s);'''
% done_callback)
def RunAction(self, tab):
if (self._selector is None and self._text is None and
self._element_function is None):
self._element_function = 'document.body'
gesture_source_type = 'chrome.gpuBenchmarking.TOUCH_INPUT'
if (page_action.IsGestureSourceTypeSupported(tab, 'mouse') and
not self._use_touch):
gesture_source_type = 'chrome.gpuBenchmarking.MOUSE_INPUT'
code = '''
function(element, info) {
if (!element) {
throw Error('Cannot find element: ' + info);
}
window.__dragAction.start({
element: element,
left_start_ratio: %s,
top_start_ratio: %s,
left_end_ratio: %s,
top_end_ratio: %s,
speed: %s,
gesture_source_type: %s
});
}''' % (self._left_start_ratio,
self._top_start_ratio,
self._left_end_ratio,
self._top_end_ratio,
self._speed,
gesture_source_type)
page_action.EvaluateCallbackWithElement(
tab, code, selector=self._selector, text=self._text,
element_function=self._element_function)
tab.WaitForJavaScriptExpression('window.__dragActionDone', 60)
| bsd-3-clause |
mega-force/xbmc | lib/gtest/test/gtest_shuffle_test.py | 184 | 12608 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy,
capture_stderr=False).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
aurelijusb/arangodb | 3rdParty/V8-4.3.61/testing/gtest/scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| apache-2.0 |
ric2b/Vivaldi-browser | chromium/net/data/parse_certificate_unittest/regenerate_pem_from_ascii.py | 11 | 3123 | #!/usr/bin/python
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Given a path to a XXX.pem file, re-generates a CERTIFICATE.
The .pem file is expected to contain comments that resemble:
#-----BEGIN XXX-----
<ascii-der values in here>
#-----END XXX-----
These are interpreted as substitutions to make inside of the Certificate
template (v3_certificate_template.txt)
"""
import sys
import os
import re
import base64
import subprocess
def read_file_to_string(path):
"""Reads a file entirely to a string"""
with open(path, 'r') as f:
return f.read()
def write_string_to_file(data, path):
"""Writes a string to a file"""
print "Writing file %s ..." % (path)
with open(path, "w") as f:
f.write(data)
def replace_string(original, start, end, replacement):
"""Replaces the specified range of |original| with |replacement|"""
return original[0:start] + replacement + original[end:]
def apply_substitution(template, name, value):
"""Finds a section named |name| in |template| and replaces it with |value|."""
# Find the section |name| in |template|.
regex = re.compile(r'#-----BEGIN %s-----(.*?)#-----END %s-----' %
(re.escape(name), re.escape(name)), re.DOTALL)
m = regex.search(template)
if not m:
print "Couldn't find a section named %s in the template" % (name)
sys.exit(1)
return replace_string(template, m.start(1), m.end(1), value)
def main():
if len(sys.argv) != 2:
print 'Usage: %s <PATH_TO_PEM>' % (sys.argv[0])
sys.exit(1)
pem_path = sys.argv[1]
orig = read_file_to_string(pem_path)
cert_ascii = read_file_to_string("v3_certificate_template.txt")
# Apply all substitutions described by comments in |orig|
regex = re.compile(r'#-----BEGIN ([\w ]+)-----(.*?)#-----END \1-----',
re.DOTALL)
num_matches = 0
for m in regex.finditer(orig):
num_matches += 1
cert_ascii = apply_substitution(cert_ascii, m.group(1), m.group(2))
if num_matches == 0:
print "Input did not contain any substitutions"
sys.exit(1)
# Convert the ascii-der to actual DER binary.
cert_der = None
try:
p = subprocess.Popen(['ascii2der'], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
cert_der = p.communicate(input=cert_ascii)[0]
except OSError as e:
print ('ERROR: Failed executing ascii2der.\n'
'Make sure this is in your path\n'
'Obtain it from https://github.com/google/der-ascii')
sys.exit(1)
# Replace the CERTIFICATE block with the newly generated one.
regex = re.compile(r'-----BEGIN CERTIFICATE-----\n(.*?)\n'
'-----END CERTIFICATE-----', re.DOTALL)
m = regex.search(orig)
if not m:
print "ERROR: Cannot find CERTIFICATE block in input"
sys.exit(1)
modified = replace_string(orig, m.start(1), m.end(1),
base64.b64encode(cert_der))
# Write back the .pem file.
write_string_to_file(modified, pem_path)
main()
| bsd-3-clause |
damdam-s/OCB | addons/account/project/project.py | 273 | 2423 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_journal(osv.osv):
_name = 'account.analytic.journal'
_description = 'Analytic Journal'
_columns = {
'name': fields.char('Journal Name', required=True),
'code': fields.char('Journal Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."),
'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, Odoo will look for a matching journal of the same type."),
'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines', copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': True,
'type': 'general',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
class account_journal(osv.osv):
_inherit="account.journal"
_columns = {
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NL66278/OCB | addons/base_action_rule/__init__.py | 438 | 1098 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_action_rule
import test_models
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cfe-lab/MiCall | micall/utils/scan_run_folders.py | 1 | 4902 | import os
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from collections import Counter, defaultdict
from csv import DictWriter
from pathlib import Path
from tempfile import NamedTemporaryFile
from zipfile import ZipFile, ZIP_DEFLATED
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
def parse_args():
parser = ArgumentParser(description='Scan run folders and report disk usage.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('run_sizes_csv',
nargs='?',
default='run_sizes.csv',
type=Path,
help='CSV file to write the size of each run in MB')
parser.add_argument('--runs',
default='/media/raw_data/MiSeq/runs',
type=Path,
help='path to MiSeq run folders')
parser.add_argument('--group_size',
default=50,
type=int,
help='scan the first run from each group')
return parser.parse_args()
def format_size(size):
return f'{int(size/1024/1024+0.5)}'
def zip_factory():
return ZipFile(NamedTemporaryFile(), 'w', ZIP_DEFLATED)
def scan_run_folders(runs_folder: Path, run_sizes_csv, group_size: int):
writer = DictWriter(run_sizes_csv, ['run', 'version', 'fastqs', 'outputs', 'zipped'])
writer.writeheader()
run_folders = sorted(run_folder
for run_folder in runs_folder.iterdir()
if run_folder.is_dir())
chosen_folders = (run_folder
for i, run_folder in enumerate(run_folders)
if i % group_size == 0)
for run_folder in chosen_folders:
print(run_folder)
data_size = 0
version_sizes = Counter()
version_zips = defaultdict(zip_factory)
results_path = run_folder / 'Results'
base_calls_path = str(run_folder / 'Data' / 'Intensities' / 'BaseCalls')
# noinspection PyTypeChecker
for dirpath, dirnames, filenames in os.walk(run_folder):
dirpath = Path(dirpath)
for file_name in filenames:
if file_name.endswith('.DS_Store'):
continue
file_path = dirpath / file_name
if (file_name.endswith('.fastq.gz') and
str(file_path).startswith(base_calls_path)):
data_size += file_path.stat().st_size
else:
try:
rel_path = file_path.relative_to(results_path)
parents = list(rel_path.parents)
if len(parents) >= 2:
version = parents[-2]
entry_path = rel_path.relative_to(version)
else:
entry_path = version = Path(file_name)
version_sizes[version] += file_path.stat().st_size
version_zip: ZipFile = version_zips[version]
version_zip.write(file_path, entry_path)
except ValueError:
pass
if not version_sizes:
version_sizes[None] = 0
for version, size in sorted(version_sizes.items()):
version_zip = version_zips[version]
raw_file = version_zip.fp
version_zip.close() # Writes index.
zip_size = raw_file.tell()
raw_file.close()
writer.writerow(dict(run=run_folder.name,
version=version,
fastqs=format_size(data_size),
outputs=format_size(size),
zipped=format_size(zip_size)))
run_sizes_csv.flush()
def main():
args = parse_args()
run_sizes_path: Path = args.run_sizes_csv
if not run_sizes_path.exists():
with run_sizes_path.open('w') as run_sizes_csv:
scan_run_folders(args.runs, run_sizes_csv, args.group_size)
runs = pd.read_csv(run_sizes_path)
runs['version'] = runs['version'].str.replace('version_', 'v')
runs['version'] = runs['version'].str.replace('-UPDATED-BOWTIE', '')
runs['version'] = runs['version'].str.replace('RC1', 'r1')
runs.sort_values('version', inplace=True)
plain_runs = runs.copy()
plain_runs['size'] = plain_runs['outputs']
plain_runs['type'] = 'unzipped'
zipped_runs = runs.copy()
zipped_runs['size'] = zipped_runs['zipped']
zipped_runs['type'] = 'zipped'
all_runs = pd.concat([plain_runs, zipped_runs])
sns.boxplot(x='size', y='version', hue='type', data=all_runs)
plt.xlabel('Output size (MB)')
plt.title('MiSeq Disk Usage')
plt.show()
main()
| agpl-3.0 |
asser/django | tests/multiple_database/tests.py | 11 | 94112 | from __future__ import unicode_literals
import datetime
import pickle
from operator import attrgetter
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.models import signals
from django.db.utils import ConnectionRouter
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils.six import StringIO
from .models import Book, Person, Pet, Review, UserProfile
from .routers import AuthRouter, TestRouter, WriteRouter
class QueryTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets will use the default database by default"
self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.using('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').all().db, 'other')
def test_default_creation(self):
"Objects created on the default database don't leak onto other databases"
# Create a book on the default database using create()
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save()
# Check that book exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('other').get(title="Pro Django")
try:
Book.objects.get(title="Dive into Python")
Book.objects.using('default').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on default database')
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('other').get(title="Dive into Python")
def test_other_creation(self):
"Objects created on another database don't leak onto the default database"
# Create a book on the second database
Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
# Check that book exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on other database')
with self.assertRaises(Book.DoesNotExist):
Book.objects.get(title="Pro Django")
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('default').get(title="Pro Django")
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
with self.assertRaises(Book.DoesNotExist):
Book.objects.get(title="Dive into Python")
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('default').get(title="Dive into Python")
def test_refresh(self):
dive = Book()
dive.title = "Dive into Python"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive2 = Book.objects.using('other').get()
dive2.title = "Dive into Python (on default)"
dive2.save(using='default')
dive.refresh_from_db()
self.assertEqual(dive.title, "Dive into Python")
dive.refresh_from_db(using='default')
self.assertEqual(dive.title, "Dive into Python (on default)")
self.assertEqual(dive._state.db, "default")
def test_basic_queries(self):
"Queries are constrained to a single database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4))
self.assertEqual(dive.title, "Dive into Python")
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('default').get(published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(title__icontains="dive")
self.assertEqual(dive.title, "Dive into Python")
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('default').get(title__icontains="dive")
dive = Book.objects.using('other').get(title__iexact="dive INTO python")
self.assertEqual(dive.title, "Dive into Python")
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('default').get(title__iexact="dive INTO python")
dive = Book.objects.using('other').get(published__year=2009)
self.assertEqual(dive.title, "Dive into Python")
self.assertEqual(dive.published, datetime.date(2009, 5, 4))
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('default').get(published__year=2009)
years = Book.objects.using('other').dates('published', 'year')
self.assertEqual([o.year for o in years], [2009])
years = Book.objects.using('default').dates('published', 'year')
self.assertEqual([o.year for o in years], [])
months = Book.objects.using('other').dates('published', 'month')
self.assertEqual([o.month for o in months], [5])
months = Book.objects.using('default').dates('published', 'month')
self.assertEqual([o.month for o in months], [])
def test_m2m_separation(self):
"M2M fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
pro.authors.set([marty])
dive.authors.set([mark])
# Inspect the m2m tables directly.
# There should be 1 entry in each database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Check that queries work across m2m joins
self.assertEqual(
list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
['Pro Django']
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
[]
)
self.assertEqual(
list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[]
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python']
)
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
mark = Person.objects.using('other').get(name="Mark Pilgrim")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.authors.all().values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)),
['Dive into Python'])
def test_m2m_forward_operations(self):
"M2M forward manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors.set([mark])
# Add a second author
john = Person.objects.using('other').create(name="John Smith")
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[]
)
dive.authors.add(john)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python']
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
['Dive into Python']
)
# Remove the second author
dive.authors.remove(john)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python']
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[]
)
# Clear all authors
dive.authors.clear()
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[]
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[]
)
# Create an author through the m2m interface
dive.authors.create(name='Jane Brown')
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[]
)
self.assertEqual(
list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)),
['Dive into Python']
)
def test_m2m_reverse_operations(self):
"M2M reverse manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors.set([mark])
# Create a second book on the other database
grease = Book.objects.using('other').create(title="Greasemonkey Hacks",
published=datetime.date(2005, 11, 1))
# Add a books to the m2m
mark.book_set.add(grease)
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim']
)
self.assertEqual(
list(
Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
),
['Mark Pilgrim']
)
# Remove a book from the m2m
mark.book_set.remove(grease)
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim']
)
self.assertEqual(
list(
Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
),
[]
)
# Clear the books associated with mark
mark.book_set.clear()
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(
Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)
),
[]
)
# Create a book through the m2m interface
mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1))
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)),
['Mark Pilgrim']
)
def test_m2m_cross_database_protection(self):
"Operations that involve sharing M2M objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited.set([pro, dive])
# Add to an m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set.add(dive)
# Set a m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set.set([pro, dive])
# Add to a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors.add(marty)
# Set a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors.set([mark, marty])
def test_m2m_deletion(self):
"Cascaded deletions of m2m relations issue queries on the right database"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
dive.authors.set([mark])
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
dive.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person still exists ...
self.assertEqual(Person.objects.using('other').count(), 1)
# ... but the book has been deleted
self.assertEqual(Book.objects.using('other').count(), 0)
# ... and the relationship object has also been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Now try deletion in the reverse direction. Set up the relation again
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive.authors.set([mark])
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person has been deleted ...
self.assertEqual(Person.objects.using('other').count(), 0)
# ... but the book still exists
self.assertEqual(Book.objects.using('other').count(), 1)
# ... and the relationship object has been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
def test_foreign_key_separation(self):
"FK fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
george = Person.objects.create(name="George Vilches")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author's favorite books
pro.editor = george
pro.save()
dive.editor = chris
dive.save()
pro = Book.objects.using('default').get(title="Pro Django")
self.assertEqual(pro.editor.name, "George Vilches")
dive = Book.objects.using('other').get(title="Dive into Python")
self.assertEqual(dive.editor.name, "Chris Mills")
# Check that queries work across foreign key joins
self.assertEqual(
list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)),
['George Vilches']
)
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(
Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
[]
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
['Chris Mills']
)
# Reget the objects to clear caches
chris = Person.objects.using('other').get(name="Chris Mills")
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(chris.edited.values_list('title', flat=True)),
['Dive into Python'])
def test_foreign_key_reverse_operations(self):
"FK reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author relations
dive.editor = chris
dive.save()
# Add a second book edited by chris
html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[]
)
chris.edited.add(html5)
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
['Chris Mills']
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
['Chris Mills']
)
# Remove the second editor
chris.edited.remove(html5)
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
['Chris Mills']
)
# Clear all edited books
chris.edited.clear()
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
[]
)
# Create an author through the m2m interface
chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15))
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[]
)
self.assertEqual(
list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)),
['Chris Mills']
)
self.assertEqual(
list(
Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)
),
[]
)
def test_foreign_key_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
dive.editor = marty
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited.set([pro, dive])
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited.add(dive)
def test_foreign_key_deletion(self):
"Cascaded deletions of Foreign Key relations issue queries on the right database"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Pet.objects.using('other').create(name="Fido", owner=mark)
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Pet.objects.using('other').count(), 1)
# Delete the person object, which will cascade onto the pet
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Person.objects.using('other').count(), 0)
self.assertEqual(Pet.objects.using('other').count(), 0)
def test_foreign_key_validation(self):
"ForeignKey.validate() uses the correct database"
mickey = Person.objects.using('other').create(name="Mickey")
pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey)
self.assertIsNone(pluto.full_clean())
def test_o2o_separation(self):
"OneToOne fields are constrained to a single database"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', '[email protected]')
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', '[email protected]')
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
# Retrieve related objects; queries should be database constrained
alice = User.objects.using('default').get(username="alice")
self.assertEqual(alice.userprofile.flavor, "chocolate")
bob = User.objects.using('other').get(username="bob")
self.assertEqual(bob.userprofile.flavor, "crunchy frog")
# Check that queries work across joins
self.assertEqual(
list(
User.objects.using('default')
.filter(userprofile__flavor='chocolate').values_list('username', flat=True)
),
['alice']
)
self.assertEqual(
list(
User.objects.using('other')
.filter(userprofile__flavor='chocolate').values_list('username', flat=True)
),
[]
)
self.assertEqual(
list(
User.objects.using('default')
.filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)
),
[]
)
self.assertEqual(
list(
User.objects.using('other')
.filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)
),
['bob']
)
# Reget the objects to clear caches
alice_profile = UserProfile.objects.using('default').get(flavor='chocolate')
bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog')
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(alice_profile.user.username, 'alice')
self.assertEqual(bob_profile.user.username, 'bob')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', '[email protected]')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', '[email protected]')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
with self.assertRaises(ValueError):
bob.userprofile = alice_profile
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
new_bob_profile = UserProfile(flavor="spring surprise")
# assigning a profile requires an explicit pk as the object isn't saved
charlie = User(pk=51, username='charlie', email='[email protected]')
charlie.set_unusable_password()
# initially, no db assigned
self.assertEqual(new_bob_profile._state.db, None)
self.assertEqual(charlie._state.db, None)
# old object comes from 'other', so the new object is set to use 'other'...
new_bob_profile.user = bob
charlie.userprofile = bob_profile
self.assertEqual(new_bob_profile._state.db, 'other')
self.assertEqual(charlie._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog'])
# When saved (no using required), new objects goes to 'other'
charlie.save()
bob_profile.save()
new_bob_profile.save()
self.assertEqual(list(User.objects.using('default').values_list('username', flat=True)),
['alice'])
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob', 'charlie'])
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# This also works if you assign the O2O relation in the constructor
denise = User.objects.db_manager('other').create_user('denise', '[email protected]')
denise_profile = UserProfile(flavor="tofu", user=denise)
self.assertEqual(denise_profile._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# When saved, the new profile goes to 'other'
denise_profile.save()
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise', 'tofu'])
def test_generic_key_separation(self):
"Generic fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review1 = Review.objects.using('default').get(source="Python Monthly")
self.assertEqual(review1.content_object.title, "Pro Django")
review2 = Review.objects.using('other').get(source="Python Weekly")
self.assertEqual(review2.content_object.title, "Dive into Python")
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)),
['Python Weekly'])
def test_generic_key_reverse_operations(self):
"Generic reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
temp = Book.objects.using('other').create(title="Temp",
published=datetime.date(2009, 5, 4))
review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp)
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly']
)
# Add a second review
dive.reviews.add(review2)
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly', 'Python Weekly']
)
# Remove the second author
dive.reviews.remove(review1)
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly']
)
# Clear all reviews
dive.reviews.clear()
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
# Create an author through the generic interface
dive.reviews.create(source='Python Daily')
self.assertEqual(
list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[]
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily']
)
def test_generic_key_cross_database_protection(self):
"Operations that involve sharing generic key objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
review1.content_object = dive
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.reviews.add(review1)
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(
list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly']
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly']
)
# When saved, John goes to 'other'
review3.save()
self.assertEqual(
list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly']
)
self.assertEqual(
list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily', 'Python Weekly']
)
def test_generic_key_deletion(self):
"Cascaded deletions of Generic Key relations issue queries on the right database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Check the initial state
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Review.objects.using('other').count(), 1)
# Delete the Book object, which will cascade onto the pet
dive.delete(using='other')
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Book.objects.using('other').count(), 0)
self.assertEqual(Review.objects.using('other').count(), 0)
def test_ordering(self):
"get_next_by_XXX commands stick to a single database"
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
learn = Book.objects.using('other').create(title="Learning Python",
published=datetime.date(2008, 7, 16))
self.assertEqual(learn.get_next_by_published().title, "Dive into Python")
self.assertEqual(dive.get_previous_by_published().title, "Learning Python")
def test_raw(self):
"test the raw() method across databases"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
def test_select_related(self):
"Database assignment is retained if an object is retrieved with select_related()"
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
# Retrieve the Person using select_related()
book = Book.objects.using('other').select_related('editor').get(title="Dive into Python")
# The editor instance should have a db state
self.assertEqual(book.editor._state.db, 'other')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
sub = Person.objects.using('other').filter(name='fff')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. If the subquery explicitly uses a
# different database, an error should be raised.
with self.assertRaises(ValueError):
str(qs.query)
# Evaluating the query shouldn't work, either
with self.assertRaises(ValueError):
for obj in qs:
pass
def test_related_manager(self):
"Related managers return managers, not querysets"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# extra_arg is removed by the BookManager's implementation of
# create(); but the BookManager's implementation won't get called
# unless edited returns a Manager, not a queryset
mark.book_set.create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.book_set.get_or_create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.get_or_create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
class ConnectionRouterTestCase(SimpleTestCase):
@override_settings(DATABASE_ROUTERS=[
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'])
def test_router_init_default(self):
connection_router = ConnectionRouter()
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
def test_router_init_arg(self):
connection_router = ConnectionRouter([
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'
])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Init with instances instead of strings
connection_router = ConnectionRouter([TestRouter(), WriteRouter()])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Make the 'other' database appear to be a replica of the 'default'
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class RouterTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets obey the router for db suggestions"
self.assertEqual(Book.objects.db, 'other')
self.assertEqual(Book.objects.all().db, 'other')
self.assertEqual(Book.objects.using('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').all().db, 'default')
def test_migrate_selection(self):
"Synchronization behavior is predictable"
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[TestRouter(), AuthRouter()]):
# Add the auth router to the chain. TestRouter is a universal
# synchronizer, so it should have no effect.
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[AuthRouter(), TestRouter()]):
# Now check what happens if the router order is reversed.
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
def test_partial_router(self):
"A router can choose to implement a subset of methods"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# First check the baseline behavior.
self.assertEqual(router.db_for_read(User), 'other')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'default')
self.assertEqual(router.db_for_write(Book), 'default')
self.assertTrue(router.allow_relation(dive, dive))
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
with override_settings(DATABASE_ROUTERS=[WriteRouter(), AuthRouter(), TestRouter()]):
self.assertEqual(router.db_for_read(User), 'default')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'writer')
self.assertEqual(router.db_for_write(Book), 'writer')
self.assertTrue(router.allow_relation(dive, dive))
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
def test_database_routing(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
pro.authors.set([marty])
# Create a book and author on the other database
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# An update query will be routed to the default database
Book.objects.filter(title='Pro Django').update(pages=200)
with self.assertRaises(Book.DoesNotExist):
# By default, the get query will be directed to 'other'
Book.objects.get(title='Pro Django')
# But the same query issued explicitly at a database will work.
pro = Book.objects.using('default').get(title='Pro Django')
# Check that the update worked.
self.assertEqual(pro.pages, 200)
# An update query with an explicit using clause will be routed
# to the requested database.
Book.objects.using('other').filter(title='Dive into Python').update(pages=300)
self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300)
# Related object queries stick to the same database
# as the original object, regardless of the router
self.assertEqual(list(pro.authors.values_list('name', flat=True)), ['Marty Alchin'])
self.assertEqual(pro.editor.name, 'Marty Alchin')
# get_or_create is a special case. The get needs to be targeted at
# the write database in order to avoid potential transaction
# consistency problems
book, created = Book.objects.get_or_create(title="Pro Django")
self.assertFalse(created)
book, created = Book.objects.get_or_create(title="Dive Into Python",
defaults={'published': datetime.date(2009, 5, 4)})
self.assertTrue(created)
# Check the head count of objects
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 1)
# If a database isn't specified, the read database is used
self.assertEqual(Book.objects.count(), 1)
# A delete query will also be routed to the default database
Book.objects.filter(pages__gt=150).delete()
# The default database has lost the book.
self.assertEqual(Book.objects.using('default').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
def test_invalid_set_foreign_key_assignment(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
dive = Book.objects.using('other').create(
title="Dive into Python",
published=datetime.date(2009, 5, 4),
)
# Set a foreign key set with an object from a different database
msg = "<Book: Dive into Python> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
marty.edited.set([dive])
def test_foreign_key_cross_database_protection(self):
"Foreign keys can cross databases if they two databases have a common source"
# Create a book and author on the default database
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('default').create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key with an object from a different database
try:
dive.editor = marty
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Set a foreign key set with an object from a different database
try:
marty.edited.set([pro, dive], bulk=False)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Assignment implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a foreign key set with an object from a different database
try:
marty.edited.add(dive, bulk=False)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Add implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
# If you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
chris = Person(name="Chris Mills")
html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
# initially, no db assigned
self.assertEqual(chris._state.db, None)
self.assertEqual(html5._state.db, None)
# old object comes from 'other', so the new object is set to use the
# source of 'other'...
self.assertEqual(dive._state.db, 'other')
chris.save()
dive.editor = chris
html5.editor = mark
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
self.assertEqual(chris._state.db, 'default')
self.assertEqual(html5._state.db, 'default')
# This also works if you assign the FK in the constructor
water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark)
self.assertEqual(water._state.db, 'default')
# For the remainder of this test, create a copy of 'mark' in the
# 'default' database to prevent integrity errors on backends that
# don't defer constraints checks until the end of the transaction
mark.save(using='default')
# This moved 'mark' in the 'default' database, move it back in 'other'
mark.save(using='other')
self.assertEqual(mark._state.db, 'other')
# If you create an object through a FK relation, it will be
# written to the write database, even if the original object
# was on the read database
cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
cheesecake, created = mark.edited.get_or_create(
title='Dive into Cheesecake',
published=datetime.date(2010, 3, 15),
)
self.assertEqual(cheesecake._state.db, 'default')
puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15))
self.assertEqual(puddles._state.db, 'default')
def test_m2m_cross_database_protection(self):
"M2M relations can cross databases if the database share a source"
# Create books and authors on the inverse to the usual database
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
dive = Book.objects.using('default').create(pk=2, title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim")
# Now save back onto the usual database.
# This simulates primary/replica - the objects exist on both database,
# but the _state.db is as it is for all other tests.
pro.save(using='default')
marty.save(using='default')
dive.save(using='other')
mark.save(using='other')
# Check that we have 2 of both types of object on both databases
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 2)
self.assertEqual(Person.objects.using('default').count(), 2)
self.assertEqual(Person.objects.using('other').count(), 2)
# Set a m2m set with an object from a different database
try:
marty.book_set.set([pro, dive])
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Add to an m2m with an object from a different database
try:
marty.book_set.add(dive)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Set a reverse m2m with an object from a different database
try:
dive.authors.set([mark, marty])
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Add to a reverse m2m with an object from a different database
try:
dive.authors.add(marty)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
alice = dive.authors.create(name='Alice')
self.assertEqual(alice._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
alice, created = dive.authors.get_or_create(name='Alice')
self.assertEqual(alice._state.db, 'default')
bob, created = dive.authors.get_or_create(name='Bob')
self.assertEqual(bob._state.db, 'default')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', '[email protected]')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', '[email protected]')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate')
try:
bob.userprofile = alice_profile
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(alice._state.db, 'default')
self.assertEqual(alice_profile._state.db, 'default')
self.assertEqual(bob._state.db, 'other')
# ... but they will when the affected object is saved.
bob.save()
self.assertEqual(bob._state.db, 'default')
def test_generic_key_cross_database_protection(self):
"Generic Key operations can span databases if they share a source"
# Create a book and author on the default database
pro = Book.objects.using(
'default').create(title="Pro Django", published=datetime.date(2008, 12, 16))
review1 = Review.objects.using(
'default').create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using(
'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
review2 = Review.objects.using(
'other').create(source="Python Weekly", content_object=dive)
# Set a generic foreign key with an object from a different database
try:
review1.content_object = dive
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a generic foreign key set with an object from a different database
try:
dive.reviews.add(review1)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use the source of 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'default')
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
dive = Book.objects.using('other').get(title='Dive into Python')
nyt = dive.reviews.create(source="New York Times", content_object=dive)
self.assertEqual(nyt._state.db, 'default')
def test_m2m_managers(self):
"M2M relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
self.assertEqual(pro.authors.db, 'other')
self.assertEqual(pro.authors.db_manager('default').db, 'default')
self.assertEqual(pro.authors.db_manager('default').all().db, 'default')
self.assertEqual(marty.book_set.db, 'other')
self.assertEqual(marty.book_set.db_manager('default').db, 'default')
self.assertEqual(marty.book_set.db_manager('default').all().db, 'default')
def test_foreign_key_managers(self):
"FK reverse relations are represented by managers, and can be controlled like managers"
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
self.assertEqual(marty.edited.db, 'other')
self.assertEqual(marty.edited.db_manager('default').db, 'default')
self.assertEqual(marty.edited.db_manager('default').all().db, 'default')
def test_generic_key_managers(self):
"Generic key relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
Review.objects.using('other').create(source="Python Monthly",
content_object=pro)
self.assertEqual(pro.reviews.db, 'other')
self.assertEqual(pro.reviews.db_manager('default').db, 'default')
self.assertEqual(pro.reviews.db_manager('default').all().db, 'default')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
sub = Person.objects.filter(name='Mark Pilgrim')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. Don't let routing instructions
# force the subquery to an incompatible database.
str(qs.query)
# If you evaluate the query, it should work, running on 'other'
self.assertEqual(list(qs.values_list('title', flat=True)), ['Dive into Python'])
def test_deferred_models(self):
mark_def = Person.objects.using('default').create(name="Mark Pilgrim")
mark_other = Person.objects.using('other').create(name="Mark Pilgrim")
orig_b = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark_other)
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
self.assertEqual(b.published, datetime.date(2009, 5, 4))
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
b.editor = mark_def
b.save(using='default')
self.assertEqual(Book.objects.using('default').get(pk=b.pk).published,
datetime.date(2009, 5, 4))
@override_settings(DATABASE_ROUTERS=[AuthRouter()])
class AuthTestCase(TestCase):
multi_db = True
def test_auth_manager(self):
"The methods on the auth manager obey database hints"
# Create one user using default allocation policy
User.objects.create_user('alice', '[email protected]')
# Create another user, explicitly specifying the database
User.objects.db_manager('default').create_user('bob', '[email protected]')
# The second user only exists on the other database
alice = User.objects.using('other').get(username='alice')
self.assertEqual(alice.username, 'alice')
self.assertEqual(alice._state.db, 'other')
with self.assertRaises(User.DoesNotExist):
User.objects.using('default').get(username='alice')
# The second user only exists on the default database
bob = User.objects.using('default').get(username='bob')
self.assertEqual(bob.username, 'bob')
self.assertEqual(bob._state.db, 'default')
with self.assertRaises(User.DoesNotExist):
User.objects.using('other').get(username='bob')
# That is... there is one user on each database
self.assertEqual(User.objects.using('default').count(), 1)
self.assertEqual(User.objects.using('other').count(), 1)
def test_dumpdata(self):
"Check that dumpdata honors allow_migrate restrictions on the router"
User.objects.create_user('alice', '[email protected]')
User.objects.db_manager('default').create_user('bob', '[email protected]')
# Check that dumping the default database doesn't try to include auth
# because allow_migrate prohibits auth on default
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '[]')
# Check that dumping the other database does include auth
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertIn('"email": "[email protected]"', command_output)
class AntiPetRouter(object):
# A router that only expresses an opinion on migrate,
# passing pets to the 'other' database
def allow_migrate(self, db, app_label, model_name=None, **hints):
if db == 'other':
return model_name == 'pet'
else:
return model_name != 'pet'
class FixtureTestCase(TestCase):
multi_db = True
fixtures = ['multidb-common', 'multidb']
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_fixture_loading(self):
"Multi-db fixtures are loaded correctly"
# Check that "Pro Django" exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('other').get(title="Pro Django")
# Check that "Dive into Python" exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
with self.assertRaises(Book.DoesNotExist):
Book.objects.get(title="Dive into Python")
with self.assertRaises(Book.DoesNotExist):
Book.objects.using('default').get(title="Dive into Python")
# Check that "Definitive Guide" exists on the both databases
try:
Book.objects.get(title="The Definitive Guide to Django")
Book.objects.using('default').get(title="The Definitive Guide to Django")
Book.objects.using('other').get(title="The Definitive Guide to Django")
except Book.DoesNotExist:
self.fail('"The Definitive Guide to Django" should exist on both databases')
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_pseudo_empty_fixtures(self):
"""
A fixture can contain entries, but lead to nothing in the database;
this shouldn't raise an error (#14068).
"""
new_io = StringIO()
management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io)
command_output = new_io.getvalue().strip()
# No objects will actually be loaded
self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)")
class PickleQuerySetTestCase(TestCase):
multi_db = True
def test_pickling(self):
for db in connections:
Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4))
qs = Book.objects.all()
self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db)
class DatabaseReceiver(object):
"""
Used in the tests for the database argument in signals (#13552)
"""
def __call__(self, signal, sender, **kwargs):
self._database = kwargs['using']
class WriteToOtherRouter(object):
"""
A router that sends all writes to the other database.
"""
def db_for_write(self, model, **hints):
return "other"
class SignalTests(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[WriteToOtherRouter()])
def test_database_arg_save_and_delete(self):
"""
Tests that the pre/post_save signal contains the correct database.
(#13552)
"""
# Make some signal receivers
pre_save_receiver = DatabaseReceiver()
post_save_receiver = DatabaseReceiver()
pre_delete_receiver = DatabaseReceiver()
post_delete_receiver = DatabaseReceiver()
# Make model and connect receivers
signals.pre_save.connect(sender=Person, receiver=pre_save_receiver)
signals.post_save.connect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.connect(sender=Person, receiver=post_delete_receiver)
p = Person.objects.create(name='Darth Vader')
# Save and test receivers got calls
p.save()
self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS)
# Delete, and test
p.delete()
self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS)
# Save again to a different database
p.save(using="other")
self.assertEqual(pre_save_receiver._database, "other")
self.assertEqual(post_save_receiver._database, "other")
# Delete, and test
p.delete(using="other")
self.assertEqual(pre_delete_receiver._database, "other")
self.assertEqual(post_delete_receiver._database, "other")
signals.pre_save.disconnect(sender=Person, receiver=pre_save_receiver)
signals.post_save.disconnect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.disconnect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.disconnect(sender=Person, receiver=post_delete_receiver)
def test_database_arg_m2m(self):
"""
Test that the m2m_changed signal has a correct database arg (#13552)
"""
# Make a receiver
receiver = DatabaseReceiver()
# Connect it
signals.m2m_changed.connect(receiver=receiver)
# Create the models that will be used for the tests
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# Create a copy of the models on the 'other' database to prevent
# integrity errors on backends that don't defer constraints checks
Book.objects.using('other').create(pk=b.pk, title=b.title,
published=b.published)
Person.objects.using('other').create(pk=p.pk, name=p.name)
# Test addition
b.authors.add(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.add(p)
self.assertEqual(receiver._database, "other")
# Test removal
b.authors.remove(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.remove(p)
self.assertEqual(receiver._database, "other")
# Test addition in reverse
p.book_set.add(b)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
p.book_set.add(b)
self.assertEqual(receiver._database, "other")
# Test clearing
b.authors.clear()
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.clear()
self.assertEqual(receiver._database, "other")
class AttributeErrorRouter(object):
"A router to test the exception handling of ConnectionRouter"
def db_for_read(self, model, **hints):
raise AttributeError
def db_for_write(self, model, **hints):
raise AttributeError
class RouterAttributeErrorTestCase(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[AttributeErrorRouter()])
def test_attribute_error_read(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
with self.override_router():
with self.assertRaises(AttributeError):
Book.objects.get(pk=b.pk)
def test_attribute_error_save(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
with self.override_router():
with self.assertRaises(AttributeError):
dive.save()
def test_attribute_error_delete(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
b.authors.set([p])
b.editor = p
with self.override_router():
with self.assertRaises(AttributeError):
b.delete()
def test_attribute_error_m2m(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
with self.override_router():
with self.assertRaises(AttributeError):
b.authors.set([p])
class ModelMetaRouter(object):
"A router to ensure model arguments are real model classes"
def db_for_write(self, model, **hints):
if not hasattr(model, '_meta'):
raise ValueError
@override_settings(DATABASE_ROUTERS=[ModelMetaRouter()])
class RouterModelArgumentTestCase(TestCase):
multi_db = True
def test_m2m_collection(self):
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# test add
b.authors.add(p)
# test remove
b.authors.remove(p)
# test clear
b.authors.clear()
# test setattr
b.authors.set([p])
# test M2M collection
b.delete()
def test_foreignkey_collection(self):
person = Person.objects.create(name='Bob')
Pet.objects.create(owner=person, name='Wart')
# test related FK collection
person.delete()
class SyncOnlyDefaultDatabaseRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == DEFAULT_DB_ALIAS
class MigrateTestCase(TestCase):
available_apps = [
'multiple_database',
'django.contrib.auth',
'django.contrib.contenttypes'
]
multi_db = True
def test_migrate_to_other_database(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
count = cts.count()
self.assertGreater(count, 0)
cts.delete()
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), count)
def test_migrate_to_other_database_with_router(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
cts.delete()
with override_settings(DATABASE_ROUTERS=[SyncOnlyDefaultDatabaseRouter()]):
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), 0)
class RouterUsed(Exception):
WRITE = 'write'
def __init__(self, mode, model, hints):
self.mode = mode
self.model = model
self.hints = hints
class RouteForWriteTestCase(TestCase):
multi_db = True
class WriteCheckRouter(object):
def db_for_write(self, model, **hints):
raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints)
def override_router(self):
return override_settings(DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()])
def test_fk_delete(self):
owner = Person.objects.create(name='Someone')
pet = Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
pet.owner.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_delete(self):
owner = Person.objects.create(name='Someone')
to_del_qs = owner.pet_set.all()
try:
with self.override_router():
to_del_qs.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_get_or_create(self):
owner = Person.objects.create(name='Someone')
try:
with self.override_router():
owner.pet_set.get_or_create(name='fido')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_update(self):
owner = Person.objects.create(name='Someone')
Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
owner.pet_set.update(name='max')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.add(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_get_or_create(self):
Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.get_or_create(name='Someone else')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.remove(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().update(name='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_reverse_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.add(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_get_or_create(self):
auth = Person.objects.create(name='Someone')
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.get_or_create(title="New Book", published=datetime.datetime.now())
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.remove(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().update(title='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
| bsd-3-clause |
riptano/cassandra-dtest | tools/files.py | 5 | 1263 | import fileinput
import os
import re
import sys
import tempfile
from dtest import debug # Depending on dtest is not good long-term.
def replace_in_file(filepath, search_replacements):
"""
In-place file search and replace.
filepath - The path of the file to edit
search_replacements - a list of tuples (regex, replacement) that
represent however many search and replace operations you wish to
perform.
Note: This does not work with multi-line regexes.
"""
for line in fileinput.input(filepath, inplace=True):
for regex, replacement in search_replacements:
line = re.sub(regex, replacement, line)
sys.stdout.write(line)
def safe_mkdtemp():
tmpdir = tempfile.mkdtemp()
# \ on Windows is interpreted as an escape character and doesn't do anyone any favors
return tmpdir.replace('\\', '/')
def size_of_files_in_dir(dir_name, verbose=True):
"""
Return the size of all files found in a non-recursive ls of the argument.
Based on http://stackoverflow.com/a/1392549
"""
files = [os.path.join(dir_name, f) for f in os.listdir(dir_name)]
if verbose:
debug('getting sizes of these files: {}'.format(files))
return sum(os.path.getsize(f) for f in files)
| apache-2.0 |
Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/test/test_ssl.py | 3 | 23583 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted SSL support.
"""
from __future__ import division, absolute_import
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from twisted.internet import protocol, reactor, interfaces, defer
from twisted.internet.error import ConnectionDone
from twisted.protocols import basic
from twisted.python.reflect import requireModule
from twisted.python.runtime import platform
from twisted.test.test_tcp import ProperlyCloseFilesMixin
import os, errno
try:
from OpenSSL import SSL, crypto
from twisted.internet import ssl
from twisted.test.ssl_helpers import ClientTLSContext, certPath
except ImportError:
def _noSSL():
# ugh, make pyflakes happy.
global SSL
global ssl
SSL = ssl = None
_noSSL()
try:
from twisted.protocols import tls as newTLS
except ImportError:
# Assuming SSL exists, we're using old version in reactor (i.e. non-protocol)
newTLS = None
class UnintelligentProtocol(basic.LineReceiver):
"""
@ivar deferred: a deferred that will fire at connection lost.
@type deferred: L{defer.Deferred}
@cvar pretext: text sent before TLS is set up.
@type pretext: C{bytes}
@cvar posttext: text sent after TLS is set up.
@type posttext: C{bytes}
"""
pretext = [
b"first line",
b"last thing before tls starts",
b"STARTTLS"]
posttext = [
b"first thing after tls started",
b"last thing ever"]
def __init__(self):
self.deferred = defer.Deferred()
def connectionMade(self):
for l in self.pretext:
self.sendLine(l)
def lineReceived(self, line):
if line == b"READY":
self.transport.startTLS(ClientTLSContext(), self.factory.client)
for l in self.posttext:
self.sendLine(l)
self.transport.loseConnection()
def connectionLost(self, reason):
self.deferred.callback(None)
class LineCollector(basic.LineReceiver):
"""
@ivar deferred: a deferred that will fire at connection lost.
@type deferred: L{defer.Deferred}
@ivar doTLS: whether the protocol is initiate TLS or not.
@type doTLS: C{bool}
@ivar fillBuffer: if set to True, it will send lots of data once
C{STARTTLS} is received.
@type fillBuffer: C{bool}
"""
def __init__(self, doTLS, fillBuffer=False):
self.doTLS = doTLS
self.fillBuffer = fillBuffer
self.deferred = defer.Deferred()
def connectionMade(self):
self.factory.rawdata = b''
self.factory.lines = []
def lineReceived(self, line):
self.factory.lines.append(line)
if line == b'STARTTLS':
if self.fillBuffer:
for x in range(500):
self.sendLine(b'X' * 1000)
self.sendLine(b'READY')
if self.doTLS:
ctx = ServerTLSContext(
privateKeyFileName=certPath,
certificateFileName=certPath,
)
self.transport.startTLS(ctx, self.factory.server)
else:
self.setRawMode()
def rawDataReceived(self, data):
self.factory.rawdata += data
self.transport.loseConnection()
def connectionLost(self, reason):
self.deferred.callback(None)
class SingleLineServerProtocol(protocol.Protocol):
"""
A protocol that sends a single line of data at C{connectionMade}.
"""
def connectionMade(self):
self.transport.write(b"+OK <some crap>\r\n")
self.transport.getPeerCertificate()
class RecordingClientProtocol(protocol.Protocol):
"""
@ivar deferred: a deferred that will fire with first received content.
@type deferred: L{defer.Deferred}
"""
def __init__(self):
self.deferred = defer.Deferred()
def connectionMade(self):
self.transport.getPeerCertificate()
def dataReceived(self, data):
self.deferred.callback(data)
class ImmediatelyDisconnectingProtocol(protocol.Protocol):
"""
A protocol that disconnect immediately on connection. It fires the
C{connectionDisconnected} deferred of its factory on connetion lost.
"""
def connectionMade(self):
self.transport.loseConnection()
def connectionLost(self, reason):
self.factory.connectionDisconnected.callback(None)
def generateCertificateObjects(organization, organizationalUnit):
"""
Create a certificate for given C{organization} and C{organizationalUnit}.
@return: a tuple of (key, request, certificate) objects.
"""
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 512)
req = crypto.X509Req()
subject = req.get_subject()
subject.O = organization
subject.OU = organizationalUnit
req.set_pubkey(pkey)
req.sign(pkey, "md5")
# Here comes the actual certificate
cert = crypto.X509()
cert.set_serial_number(1)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60) # Testing certificates need not be long lived
cert.set_issuer(req.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(pkey, "md5")
return pkey, req, cert
def generateCertificateFiles(basename, organization, organizationalUnit):
"""
Create certificate files key, req and cert prefixed by C{basename} for
given C{organization} and C{organizationalUnit}.
"""
pkey, req, cert = generateCertificateObjects(organization, organizationalUnit)
for ext, obj, dumpFunc in [
('key', pkey, crypto.dump_privatekey),
('req', req, crypto.dump_certificate_request),
('cert', cert, crypto.dump_certificate)]:
fName = os.extsep.join((basename, ext)).encode("utf-8")
FilePath(fName).setContent(dumpFunc(crypto.FILETYPE_PEM, obj))
class ContextGeneratingMixin:
"""
Offer methods to create L{ssl.DefaultOpenSSLContextFactory} for both client
and server.
@ivar clientBase: prefix of client certificate files.
@type clientBase: C{str}
@ivar serverBase: prefix of server certificate files.
@type serverBase: C{str}
@ivar clientCtxFactory: a generated context factory to be used in
C{reactor.connectSSL}.
@type clientCtxFactory: L{ssl.DefaultOpenSSLContextFactory}
@ivar serverCtxFactory: a generated context factory to be used in
C{reactor.listenSSL}.
@type serverCtxFactory: L{ssl.DefaultOpenSSLContextFactory}
"""
def makeContextFactory(self, org, orgUnit, *args, **kwArgs):
base = self.mktemp()
generateCertificateFiles(base, org, orgUnit)
serverCtxFactory = ssl.DefaultOpenSSLContextFactory(
os.extsep.join((base, 'key')),
os.extsep.join((base, 'cert')),
*args, **kwArgs)
return base, serverCtxFactory
def setupServerAndClient(self, clientArgs, clientKwArgs, serverArgs,
serverKwArgs):
self.clientBase, self.clientCtxFactory = self.makeContextFactory(
*clientArgs, **clientKwArgs)
self.serverBase, self.serverCtxFactory = self.makeContextFactory(
*serverArgs, **serverKwArgs)
if SSL is not None:
class ServerTLSContext(ssl.DefaultOpenSSLContextFactory):
"""
A context factory with a default method set to L{SSL.TLSv1_METHOD}.
"""
isClient = False
def __init__(self, *args, **kw):
kw['sslmethod'] = SSL.TLSv1_METHOD
ssl.DefaultOpenSSLContextFactory.__init__(self, *args, **kw)
class StolenTCPTests(ProperlyCloseFilesMixin, unittest.TestCase):
"""
For SSL transports, test many of the same things which are tested for
TCP transports.
"""
def createServer(self, address, portNumber, factory):
"""
Create an SSL server with a certificate using L{IReactorSSL.listenSSL}.
"""
cert = ssl.PrivateCertificate.loadPEM(FilePath(certPath).getContent())
contextFactory = cert.options()
return reactor.listenSSL(
portNumber, factory, contextFactory, interface=address)
def connectClient(self, address, portNumber, clientCreator):
"""
Create an SSL client using L{IReactorSSL.connectSSL}.
"""
contextFactory = ssl.CertificateOptions()
return clientCreator.connectSSL(address, portNumber, contextFactory)
def getHandleExceptionType(self):
"""
Return L{SSL.Error} as the expected error type which will be raised by
a write to the L{OpenSSL.SSL.Connection} object after it has been
closed.
"""
return SSL.Error
def getHandleErrorCode(self):
"""
Return the argument L{SSL.Error} will be constructed with for this
case. This is basically just a random OpenSSL implementation detail.
It would be better if this test worked in a way which did not require
this.
"""
# Windows 2000 SP 4 and Windows XP SP 2 give back WSAENOTSOCK for
# SSL.Connection.write for some reason. The twisted.protocols.tls
# implementation of IReactorSSL doesn't suffer from this imprecation,
# though, since it is isolated from the Windows I/O layer (I suppose?).
# If test_properlyCloseFiles waited for the SSL handshake to complete
# and performed an orderly shutdown, then this would probably be a
# little less weird: writing to a shutdown SSL connection has a more
# well-defined failure mode (or at least it should).
# So figure out if twisted.protocols.tls is in use. If it can be
# imported, it should be.
if requireModule('twisted.protocols.tls') is None:
# It isn't available, so we expect WSAENOTSOCK if we're on Windows.
if platform.getType() == 'win32':
return errno.WSAENOTSOCK
# Otherwise, we expect an error about how we tried to write to a
# shutdown connection. This is terribly implementation-specific.
return [('SSL routines', 'SSL_write', 'protocol is shutdown')]
class TLSTests(unittest.TestCase):
"""
Tests for startTLS support.
@ivar fillBuffer: forwarded to L{LineCollector.fillBuffer}
@type fillBuffer: C{bool}
"""
fillBuffer = False
clientProto = None
serverProto = None
def tearDown(self):
if self.clientProto.transport is not None:
self.clientProto.transport.loseConnection()
if self.serverProto.transport is not None:
self.serverProto.transport.loseConnection()
def _runTest(self, clientProto, serverProto, clientIsServer=False):
"""
Helper method to run TLS tests.
@param clientProto: protocol instance attached to the client
connection.
@param serverProto: protocol instance attached to the server
connection.
@param clientIsServer: flag indicated if client should initiate
startTLS instead of server.
@return: a L{defer.Deferred} that will fire when both connections are
lost.
"""
self.clientProto = clientProto
cf = self.clientFactory = protocol.ClientFactory()
cf.protocol = lambda: clientProto
if clientIsServer:
cf.server = False
else:
cf.client = True
self.serverProto = serverProto
sf = self.serverFactory = protocol.ServerFactory()
sf.protocol = lambda: serverProto
if clientIsServer:
sf.client = False
else:
sf.server = True
port = reactor.listenTCP(0, sf, interface="127.0.0.1")
self.addCleanup(port.stopListening)
reactor.connectTCP('127.0.0.1', port.getHost().port, cf)
return defer.gatherResults([clientProto.deferred, serverProto.deferred])
def test_TLS(self):
"""
Test for server and client startTLS: client should received data both
before and after the startTLS.
"""
def check(ignore):
self.assertEqual(
self.serverFactory.lines,
UnintelligentProtocol.pretext + UnintelligentProtocol.posttext
)
d = self._runTest(UnintelligentProtocol(),
LineCollector(True, self.fillBuffer))
return d.addCallback(check)
def test_unTLS(self):
"""
Test for server startTLS not followed by a startTLS in client: the data
received after server startTLS should be received as raw.
"""
def check(ignored):
self.assertEqual(
self.serverFactory.lines,
UnintelligentProtocol.pretext
)
self.assertTrue(self.serverFactory.rawdata,
"No encrypted bytes received")
d = self._runTest(UnintelligentProtocol(),
LineCollector(False, self.fillBuffer))
return d.addCallback(check)
def test_backwardsTLS(self):
"""
Test startTLS first initiated by client.
"""
def check(ignored):
self.assertEqual(
self.clientFactory.lines,
UnintelligentProtocol.pretext + UnintelligentProtocol.posttext
)
d = self._runTest(LineCollector(True, self.fillBuffer),
UnintelligentProtocol(), True)
return d.addCallback(check)
class SpammyTLSTests(TLSTests):
"""
Test TLS features with bytes sitting in the out buffer.
"""
fillBuffer = True
class BufferingTests(unittest.TestCase):
serverProto = None
clientProto = None
def tearDown(self):
if self.serverProto.transport is not None:
self.serverProto.transport.loseConnection()
if self.clientProto.transport is not None:
self.clientProto.transport.loseConnection()
def test_openSSLBuffering(self):
serverProto = self.serverProto = SingleLineServerProtocol()
clientProto = self.clientProto = RecordingClientProtocol()
server = protocol.ServerFactory()
client = self.client = protocol.ClientFactory()
server.protocol = lambda: serverProto
client.protocol = lambda: clientProto
sCTX = ssl.DefaultOpenSSLContextFactory(certPath, certPath)
cCTX = ssl.ClientContextFactory()
port = reactor.listenSSL(0, server, sCTX, interface='127.0.0.1')
self.addCleanup(port.stopListening)
reactor.connectSSL('127.0.0.1', port.getHost().port, client, cCTX)
return clientProto.deferred.addCallback(
self.assertEqual, b"+OK <some crap>\r\n")
class ConnectionLostTests(unittest.TestCase, ContextGeneratingMixin):
"""
SSL connection closing tests.
"""
def testImmediateDisconnect(self):
org = "twisted.test.test_ssl"
self.setupServerAndClient(
(org, org + ", client"), {},
(org, org + ", server"), {})
# Set up a server, connect to it with a client, which should work since our verifiers
# allow anything, then disconnect.
serverProtocolFactory = protocol.ServerFactory()
serverProtocolFactory.protocol = protocol.Protocol
self.serverPort = serverPort = reactor.listenSSL(0,
serverProtocolFactory, self.serverCtxFactory)
clientProtocolFactory = protocol.ClientFactory()
clientProtocolFactory.protocol = ImmediatelyDisconnectingProtocol
clientProtocolFactory.connectionDisconnected = defer.Deferred()
reactor.connectSSL('127.0.0.1',
serverPort.getHost().port, clientProtocolFactory, self.clientCtxFactory)
return clientProtocolFactory.connectionDisconnected.addCallback(
lambda ignoredResult: self.serverPort.stopListening())
def test_bothSidesLoseConnection(self):
"""
Both sides of SSL connection close connection; the connections should
close cleanly, and only after the underlying TCP connection has
disconnected.
"""
class CloseAfterHandshake(protocol.Protocol):
gotData = False
def __init__(self):
self.done = defer.Deferred()
def connectionMade(self):
self.transport.write(b"a")
def dataReceived(self, data):
# If we got data, handshake is over:
self.gotData = True
self.transport.loseConnection()
def connectionLost(self, reason):
if not self.gotData:
reason = RuntimeError("We never received the data!")
self.done.errback(reason)
del self.done
org = "twisted.test.test_ssl"
self.setupServerAndClient(
(org, org + ", client"), {},
(org, org + ", server"), {})
serverProtocol = CloseAfterHandshake()
serverProtocolFactory = protocol.ServerFactory()
serverProtocolFactory.protocol = lambda: serverProtocol
serverPort = reactor.listenSSL(0,
serverProtocolFactory, self.serverCtxFactory)
self.addCleanup(serverPort.stopListening)
clientProtocol = CloseAfterHandshake()
clientProtocolFactory = protocol.ClientFactory()
clientProtocolFactory.protocol = lambda: clientProtocol
reactor.connectSSL('127.0.0.1',
serverPort.getHost().port, clientProtocolFactory, self.clientCtxFactory)
def checkResult(failure):
failure.trap(ConnectionDone)
return defer.gatherResults(
[clientProtocol.done.addErrback(checkResult),
serverProtocol.done.addErrback(checkResult)])
if newTLS is None:
test_bothSidesLoseConnection.skip = "Old SSL code doesn't always close cleanly."
def testFailedVerify(self):
org = "twisted.test.test_ssl"
self.setupServerAndClient(
(org, org + ", client"), {},
(org, org + ", server"), {})
def verify(*a):
return False
self.clientCtxFactory.getContext().set_verify(SSL.VERIFY_PEER, verify)
serverConnLost = defer.Deferred()
serverProtocol = protocol.Protocol()
serverProtocol.connectionLost = serverConnLost.callback
serverProtocolFactory = protocol.ServerFactory()
serverProtocolFactory.protocol = lambda: serverProtocol
self.serverPort = serverPort = reactor.listenSSL(0,
serverProtocolFactory, self.serverCtxFactory)
clientConnLost = defer.Deferred()
clientProtocol = protocol.Protocol()
clientProtocol.connectionLost = clientConnLost.callback
clientProtocolFactory = protocol.ClientFactory()
clientProtocolFactory.protocol = lambda: clientProtocol
reactor.connectSSL('127.0.0.1',
serverPort.getHost().port, clientProtocolFactory, self.clientCtxFactory)
dl = defer.DeferredList([serverConnLost, clientConnLost], consumeErrors=True)
return dl.addCallback(self._cbLostConns)
def _cbLostConns(self, results):
(sSuccess, sResult), (cSuccess, cResult) = results
self.assertFalse(sSuccess)
self.assertFalse(cSuccess)
acceptableErrors = [SSL.Error]
# Rather than getting a verification failure on Windows, we are getting
# a connection failure. Without something like sslverify proxying
# in-between we can't fix up the platform's errors, so let's just
# specifically say it is only OK in this one case to keep the tests
# passing. Normally we'd like to be as strict as possible here, so
# we're not going to allow this to report errors incorrectly on any
# other platforms.
if platform.isWindows():
from twisted.internet.error import ConnectionLost
acceptableErrors.append(ConnectionLost)
sResult.trap(*acceptableErrors)
cResult.trap(*acceptableErrors)
return self.serverPort.stopListening()
class FakeContext:
"""
L{OpenSSL.SSL.Context} double which can more easily be inspected.
"""
def __init__(self, method):
self._method = method
self._options = 0
def set_options(self, options):
self._options |= options
def use_certificate_file(self, fileName):
pass
def use_privatekey_file(self, fileName):
pass
class DefaultOpenSSLContextFactoryTests(unittest.TestCase):
"""
Tests for L{ssl.DefaultOpenSSLContextFactory}.
"""
def setUp(self):
# pyOpenSSL Context objects aren't introspectable enough. Pass in
# an alternate context factory so we can inspect what is done to it.
self.contextFactory = ssl.DefaultOpenSSLContextFactory(
certPath, certPath, _contextFactory=FakeContext)
self.context = self.contextFactory.getContext()
def test_method(self):
"""
L{ssl.DefaultOpenSSLContextFactory.getContext} returns an SSL context
which can use SSLv3 or TLSv1 but not SSLv2.
"""
# SSLv23_METHOD allows SSLv2, SSLv3, or TLSv1
self.assertEqual(self.context._method, SSL.SSLv23_METHOD)
# And OP_NO_SSLv2 disables the SSLv2 support.
self.assertTrue(self.context._options & SSL.OP_NO_SSLv2)
# Make sure SSLv3 and TLSv1 aren't disabled though.
self.assertFalse(self.context._options & SSL.OP_NO_SSLv3)
self.assertFalse(self.context._options & SSL.OP_NO_TLSv1)
def test_missingCertificateFile(self):
"""
Instantiating L{ssl.DefaultOpenSSLContextFactory} with a certificate
filename which does not identify an existing file results in the
initializer raising L{OpenSSL.SSL.Error}.
"""
self.assertRaises(
SSL.Error,
ssl.DefaultOpenSSLContextFactory, certPath, self.mktemp())
def test_missingPrivateKeyFile(self):
"""
Instantiating L{ssl.DefaultOpenSSLContextFactory} with a private key
filename which does not identify an existing file results in the
initializer raising L{OpenSSL.SSL.Error}.
"""
self.assertRaises(
SSL.Error,
ssl.DefaultOpenSSLContextFactory, self.mktemp(), certPath)
class ClientContextFactoryTests(unittest.TestCase):
"""
Tests for L{ssl.ClientContextFactory}.
"""
def setUp(self):
self.contextFactory = ssl.ClientContextFactory()
self.contextFactory._contextFactory = FakeContext
self.context = self.contextFactory.getContext()
def test_method(self):
"""
L{ssl.ClientContextFactory.getContext} returns a context which can use
SSLv3 or TLSv1 but not SSLv2.
"""
self.assertEqual(self.context._method, SSL.SSLv23_METHOD)
self.assertTrue(self.context._options & SSL.OP_NO_SSLv2)
self.assertFalse(self.context._options & SSL.OP_NO_SSLv3)
self.assertFalse(self.context._options & SSL.OP_NO_TLSv1)
if interfaces.IReactorSSL(reactor, None) is None:
for tCase in [StolenTCPTests, TLSTests, SpammyTLSTests,
BufferingTests, ConnectionLostTests,
DefaultOpenSSLContextFactoryTests,
ClientContextFactoryTests]:
tCase.skip = "Reactor does not support SSL, cannot run SSL tests"
| gpl-3.0 |
metaml/nupic | src/nupic/datafiles/extra/firstOrder/raw/makeDataset.py | 34 | 3485 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file import File
def createFirstOrderModel(numCategories=5, alpha=0.5):
categoryList = ['cat%02d' % i for i in range(numCategories)]
initProbability = numpy.ones(numCategories)/numCategories
transitionTable = numpy.random.dirichlet(alpha=[alpha]*numCategories,
size=numCategories)
return categoryList, initProbability, transitionTable
def generateFirstOrderData(model, numIterations=10000, seqLength=5,
resets=True, suffix='train'):
print "Creating %d iteration file with seqLength %d" % (numIterations, seqLength)
print "Filename",
categoryList, initProbability, transitionTable = model
initProbability = initProbability.cumsum()
transitionTable = transitionTable.cumsum(axis=1)
outputFile = 'fo_%d_%d_%s.csv' % (numIterations, seqLength, suffix)
print "Filename", outputFile
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
o = File(outputFile, fields)
seqIdx = 0
rand = numpy.random.rand()
catIdx = numpy.searchsorted(initProbability, rand)
for i in xrange(numIterations):
rand = numpy.random.rand()
if seqIdx == 0 and resets:
catIdx = numpy.searchsorted(initProbability, rand)
reset = 1
else:
catIdx = numpy.searchsorted(transitionTable[catIdx], rand)
reset = 0
o.write([reset,categoryList[catIdx]])
seqIdx = (seqIdx+1)%seqLength
o.close()
if __name__=='__main__':
numpy.random.seed(1956)
model = createFirstOrderModel()
categoryList = model[0]
categoryFile = open("categories.txt", 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
#import pylab
#pylab.imshow(model[2], interpolation='nearest')
#pylab.show()
for resets in [True, False]:
for seqLength in [2, 10]:
for numIterations in [1000, 10000, 100000]:
generateFirstOrderData(model,
numIterations=numIterations,
seqLength=seqLength,
resets=resets,
suffix='train_%s' % ('resets' if resets else 'noresets',))
generateFirstOrderData(model, numIterations=10000, seqLength=seqLength,
resets=resets,
suffix='test_%s' % ('resets' if resets else 'noresets',))
| agpl-3.0 |
ActionAdam/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x058.py | 252 | 4678 | data = (
'Ku ', # 0x00
'Ke ', # 0x01
'Tang ', # 0x02
'Kun ', # 0x03
'Ni ', # 0x04
'Jian ', # 0x05
'Dui ', # 0x06
'Jin ', # 0x07
'Gang ', # 0x08
'Yu ', # 0x09
'E ', # 0x0a
'Peng ', # 0x0b
'Gu ', # 0x0c
'Tu ', # 0x0d
'Leng ', # 0x0e
'[?] ', # 0x0f
'Ya ', # 0x10
'Qian ', # 0x11
'[?] ', # 0x12
'An ', # 0x13
'[?] ', # 0x14
'Duo ', # 0x15
'Nao ', # 0x16
'Tu ', # 0x17
'Cheng ', # 0x18
'Yin ', # 0x19
'Hun ', # 0x1a
'Bi ', # 0x1b
'Lian ', # 0x1c
'Guo ', # 0x1d
'Die ', # 0x1e
'Zhuan ', # 0x1f
'Hou ', # 0x20
'Bao ', # 0x21
'Bao ', # 0x22
'Yu ', # 0x23
'Di ', # 0x24
'Mao ', # 0x25
'Jie ', # 0x26
'Ruan ', # 0x27
'E ', # 0x28
'Geng ', # 0x29
'Kan ', # 0x2a
'Zong ', # 0x2b
'Yu ', # 0x2c
'Huang ', # 0x2d
'E ', # 0x2e
'Yao ', # 0x2f
'Yan ', # 0x30
'Bao ', # 0x31
'Ji ', # 0x32
'Mei ', # 0x33
'Chang ', # 0x34
'Du ', # 0x35
'Tuo ', # 0x36
'Yin ', # 0x37
'Feng ', # 0x38
'Zhong ', # 0x39
'Jie ', # 0x3a
'Zhen ', # 0x3b
'Feng ', # 0x3c
'Gang ', # 0x3d
'Chuan ', # 0x3e
'Jian ', # 0x3f
'Pyeng ', # 0x40
'Toride ', # 0x41
'Xiang ', # 0x42
'Huang ', # 0x43
'Leng ', # 0x44
'Duan ', # 0x45
'[?] ', # 0x46
'Xuan ', # 0x47
'Ji ', # 0x48
'Ji ', # 0x49
'Kuai ', # 0x4a
'Ying ', # 0x4b
'Ta ', # 0x4c
'Cheng ', # 0x4d
'Yong ', # 0x4e
'Kai ', # 0x4f
'Su ', # 0x50
'Su ', # 0x51
'Shi ', # 0x52
'Mi ', # 0x53
'Ta ', # 0x54
'Weng ', # 0x55
'Cheng ', # 0x56
'Tu ', # 0x57
'Tang ', # 0x58
'Que ', # 0x59
'Zhong ', # 0x5a
'Li ', # 0x5b
'Peng ', # 0x5c
'Bang ', # 0x5d
'Sai ', # 0x5e
'Zang ', # 0x5f
'Dui ', # 0x60
'Tian ', # 0x61
'Wu ', # 0x62
'Cheng ', # 0x63
'Xun ', # 0x64
'Ge ', # 0x65
'Zhen ', # 0x66
'Ai ', # 0x67
'Gong ', # 0x68
'Yan ', # 0x69
'Kan ', # 0x6a
'Tian ', # 0x6b
'Yuan ', # 0x6c
'Wen ', # 0x6d
'Xie ', # 0x6e
'Liu ', # 0x6f
'Ama ', # 0x70
'Lang ', # 0x71
'Chang ', # 0x72
'Peng ', # 0x73
'Beng ', # 0x74
'Chen ', # 0x75
'Cu ', # 0x76
'Lu ', # 0x77
'Ou ', # 0x78
'Qian ', # 0x79
'Mei ', # 0x7a
'Mo ', # 0x7b
'Zhuan ', # 0x7c
'Shuang ', # 0x7d
'Shu ', # 0x7e
'Lou ', # 0x7f
'Chi ', # 0x80
'Man ', # 0x81
'Biao ', # 0x82
'Jing ', # 0x83
'Qi ', # 0x84
'Shu ', # 0x85
'Di ', # 0x86
'Zhang ', # 0x87
'Kan ', # 0x88
'Yong ', # 0x89
'Dian ', # 0x8a
'Chen ', # 0x8b
'Zhi ', # 0x8c
'Xi ', # 0x8d
'Guo ', # 0x8e
'Qiang ', # 0x8f
'Jin ', # 0x90
'Di ', # 0x91
'Shang ', # 0x92
'Mu ', # 0x93
'Cui ', # 0x94
'Yan ', # 0x95
'Ta ', # 0x96
'Zeng ', # 0x97
'Qi ', # 0x98
'Qiang ', # 0x99
'Liang ', # 0x9a
'[?] ', # 0x9b
'Zhui ', # 0x9c
'Qiao ', # 0x9d
'Zeng ', # 0x9e
'Xu ', # 0x9f
'Shan ', # 0xa0
'Shan ', # 0xa1
'Ba ', # 0xa2
'Pu ', # 0xa3
'Kuai ', # 0xa4
'Dong ', # 0xa5
'Fan ', # 0xa6
'Que ', # 0xa7
'Mo ', # 0xa8
'Dun ', # 0xa9
'Dun ', # 0xaa
'Dun ', # 0xab
'Di ', # 0xac
'Sheng ', # 0xad
'Duo ', # 0xae
'Duo ', # 0xaf
'Tan ', # 0xb0
'Deng ', # 0xb1
'Wu ', # 0xb2
'Fen ', # 0xb3
'Huang ', # 0xb4
'Tan ', # 0xb5
'Da ', # 0xb6
'Ye ', # 0xb7
'Sho ', # 0xb8
'Mama ', # 0xb9
'Yu ', # 0xba
'Qiang ', # 0xbb
'Ji ', # 0xbc
'Qiao ', # 0xbd
'Ken ', # 0xbe
'Yi ', # 0xbf
'Pi ', # 0xc0
'Bi ', # 0xc1
'Dian ', # 0xc2
'Jiang ', # 0xc3
'Ye ', # 0xc4
'Yong ', # 0xc5
'Bo ', # 0xc6
'Tan ', # 0xc7
'Lan ', # 0xc8
'Ju ', # 0xc9
'Huai ', # 0xca
'Dang ', # 0xcb
'Rang ', # 0xcc
'Qian ', # 0xcd
'Xun ', # 0xce
'Lan ', # 0xcf
'Xi ', # 0xd0
'He ', # 0xd1
'Ai ', # 0xd2
'Ya ', # 0xd3
'Dao ', # 0xd4
'Hao ', # 0xd5
'Ruan ', # 0xd6
'Mama ', # 0xd7
'Lei ', # 0xd8
'Kuang ', # 0xd9
'Lu ', # 0xda
'Yan ', # 0xdb
'Tan ', # 0xdc
'Wei ', # 0xdd
'Huai ', # 0xde
'Long ', # 0xdf
'Long ', # 0xe0
'Rui ', # 0xe1
'Li ', # 0xe2
'Lin ', # 0xe3
'Rang ', # 0xe4
'Ten ', # 0xe5
'Xun ', # 0xe6
'Yan ', # 0xe7
'Lei ', # 0xe8
'Ba ', # 0xe9
'[?] ', # 0xea
'Shi ', # 0xeb
'Ren ', # 0xec
'[?] ', # 0xed
'Zhuang ', # 0xee
'Zhuang ', # 0xef
'Sheng ', # 0xf0
'Yi ', # 0xf1
'Mai ', # 0xf2
'Ke ', # 0xf3
'Zhu ', # 0xf4
'Zhuang ', # 0xf5
'Hu ', # 0xf6
'Hu ', # 0xf7
'Kun ', # 0xf8
'Yi ', # 0xf9
'Hu ', # 0xfa
'Xu ', # 0xfb
'Kun ', # 0xfc
'Shou ', # 0xfd
'Mang ', # 0xfe
'Zun ', # 0xff
)
| gpl-2.0 |
Stane1983/u-boot | doc/conf.py | 2 | 18179 | # -*- coding: utf-8 -*-
#
# The U-Boot documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
latex_engine = 'xelatex'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'kfigure']
#
# cdomain is badly broken in Sphinx 3+. Leaving it out generates *most*
# of the docs correctly, but not all.
#
if major >= 3:
if (major > 3) or (minor > 0 or patch >= 2):
sys.stderr.write('''The build process with Sphinx 3+ is broken.
You will have to remove -W in doc/Makefile.
''')
# Sphinx c function parser is more pedantic with regards to type
# checking. Due to that, having macros at c:function cause problems.
# Those needed to be escaped by using c_id_attributes[] array
c_id_attributes = [
# include/linux/compiler.h
"__maybe_unused",
# include/efi.h
"EFIAPI",
# include/efi_loader.h
"__efi_runtime",
]
else:
extensions.append('cdomain')
# The name of the math extension changed on Sphinx 1.4
if (major == 1 and minor > 3) or (major > 1):
extensions.append("sphinx.ext.imgmath")
else:
extensions.append("sphinx.ext.pngmath")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Das U-Boot'
copyright = 'The U-Boot development community'
author = 'The U-Boot development community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# In a normal build, version and release are are set to KERNELVERSION and
# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
# arguments.
#
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
makefile_version = None
makefile_patchlevel = None
for line in open('../Makefile'):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION':
makefile_version = val
elif key == 'PATCHLEVEL':
makefile_patchlevel = val
if makefile_version and makefile_patchlevel:
break
except:
pass
finally:
if makefile_version and makefile_patchlevel:
version = release = makefile_version + '.' + makefile_patchlevel
else:
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['output']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
primary_domain = 'c'
highlight_language = 'none'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# The Read the Docs theme is available from
# - https://github.com/snide/sphinx_rtd_theme
# - https://pypi.python.org/pypi/sphinx_rtd_theme
# - python-sphinx-rtd-theme package (on Debian)
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
sys.stderr.write('Warning: The Sphinx \'sphinx_rtd_theme\' HTML theme was not found. Make sure you have the theme installed to produce pretty HTML output. Falling back to the default theme.\n')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../tools/logos/u-boot_logo.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx-static']
html_context = {
'css_files': [
'_static/theme_overrides.css',
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TheUBootdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '8pt',
# Latex figure (float) alignment
#'figure_align': 'htbp',
# Don't mangle with UTF-8 chars
'inputenc': '',
'utf8extra': '',
# Additional stuff for the LaTeX preamble.
'preamble': '''
% Use some font with UTF-8 support with XeLaTeX
\\usepackage{fontspec}
\\setsansfont{DejaVu Serif}
\\setromanfont{DejaVu Sans}
\\setmonofont{DejaVu Sans Mono}
'''
}
# Fix reference escape troubles with Sphinx 1.4.x
if major == 1 and minor > 3:
latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
if major == 1 and minor <= 4:
latex_elements['preamble'] += '\\usepackage[margin=0.5in, top=1in, bottom=1in]{geometry}'
elif major == 1 and (minor > 5 or (minor == 5 and patch >= 3)):
latex_elements['sphinxsetup'] = 'hmargin=0.5in, vmargin=1in'
latex_elements['preamble'] += '\\fvset{fontsize=auto}\n'
# Customize notice background colors on Sphinx < 1.6:
if major == 1 and minor < 6:
latex_elements['preamble'] += '''
\\usepackage{ifthen}
% Put notes in color and let them be inside a table
\\definecolor{NoteColor}{RGB}{204,255,255}
\\definecolor{WarningColor}{RGB}{255,204,204}
\\definecolor{AttentionColor}{RGB}{255,255,204}
\\definecolor{ImportantColor}{RGB}{192,255,204}
\\definecolor{OtherColor}{RGB}{204,204,204}
\\newlength{\\mynoticelength}
\\makeatletter\\newenvironment{coloredbox}[1]{%
\\setlength{\\fboxrule}{1pt}
\\setlength{\\fboxsep}{7pt}
\\setlength{\\mynoticelength}{\\linewidth}
\\addtolength{\\mynoticelength}{-2\\fboxsep}
\\addtolength{\\mynoticelength}{-2\\fboxrule}
\\begin{lrbox}{\\@tempboxa}\\begin{minipage}{\\mynoticelength}}{\\end{minipage}\\end{lrbox}%
\\ifthenelse%
{\\equal{\\py@noticetype}{note}}%
{\\colorbox{NoteColor}{\\usebox{\\@tempboxa}}}%
{%
\\ifthenelse%
{\\equal{\\py@noticetype}{warning}}%
{\\colorbox{WarningColor}{\\usebox{\\@tempboxa}}}%
{%
\\ifthenelse%
{\\equal{\\py@noticetype}{attention}}%
{\\colorbox{AttentionColor}{\\usebox{\\@tempboxa}}}%
{%
\\ifthenelse%
{\\equal{\\py@noticetype}{important}}%
{\\colorbox{ImportantColor}{\\usebox{\\@tempboxa}}}%
{\\colorbox{OtherColor}{\\usebox{\\@tempboxa}}}%
}%
}%
}%
}\\makeatother
\\makeatletter
\\renewenvironment{notice}[2]{%
\\def\\py@noticetype{#1}
\\begin{coloredbox}{#1}
\\bf\\it
\\par\\strong{#2}
\\csname py@noticestart@#1\\endcsname
}
{
\\csname py@noticeend@\\py@noticetype\\endcsname
\\end{coloredbox}
}
\\makeatother
'''
# With Sphinx 1.6, it is possible to change the Bg color directly
# by using:
# \definecolor{sphinxnoteBgColor}{RGB}{204,255,255}
# \definecolor{sphinxwarningBgColor}{RGB}{255,204,204}
# \definecolor{sphinxattentionBgColor}{RGB}{255,255,204}
# \definecolor{sphinximportantBgColor}{RGB}{192,255,204}
#
# However, it require to use sphinx heavy box with:
#
# \renewenvironment{sphinxlightbox} {%
# \\begin{sphinxheavybox}
# }
# \\end{sphinxheavybox}
# }
#
# Unfortunately, the implementation is buggy: if a note is inside a
# table, it isn't displayed well. So, for now, let's use boring
# black and white notes.
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# Sorted in alphabetical order
latex_documents = [
('index', 'u-boot-hacker-manual.tex', 'U-Boot Hacker Manual',
'The U-Boot development community', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dasuboot', 'The U-Boot Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DasUBoot', 'The U-Boot Documentation',
author, 'DasUBoot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
#=======
# rst2pdf
#
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# See the Sphinx chapter of http://ralsina.me/static/manual.pdf
#
# FIXME: Do not add the index file here; the result will be too big. Adding
# multiple PDF files here actually tries to get the cross-referencing right
# *between* PDF files.
pdf_documents = [
('uboot-documentation', u'U-Boot', u'U-Boot', u'J. Random Bozo'),
]
# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
# the Docs). In a normal build, these are supplied from the Makefile via command
# line arguments.
kerneldoc_bin = '../scripts/kernel-doc'
kerneldoc_srctree = '..'
# ------------------------------------------------------------------------------
# Since loadConfig overwrites settings from the global namespace, it has to be
# the last statement in the conf.py file
# ------------------------------------------------------------------------------
loadConfig(globals())
| gpl-2.0 |
teochenglim/ansible-modules-extras | cloud/amazon/ec2_remote_facts.py | 15 | 6388 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_remote_facts
short_description: Gather facts about ec2 instances in AWS
description:
- Gather facts about ec2 instances in AWS
version_added: "2.0"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters.
required: false
default: null
author:
- "Michael Schuett (@michaeljs1990)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all ec2 instances
- ec2_remote_facts:
# Gather facts about all running ec2 instances with a tag of Name:Example
- ec2_remote_facts:
filters:
instance-state-name: running
"tag:Name": Example
# Gather facts about instance i-123456
- ec2_remote_facts:
filters:
instance-id: i-123456
# Gather facts about all instances in vpc-123456 that are t2.small type
- ec2_remote_facts:
filters:
vpc-id: vpc-123456
instance-type: t2.small
'''
try:
import boto.ec2
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
def get_instance_info(instance):
# Get groups
groups = []
for group in instance.groups:
groups.append({ 'id': group.id, 'name': group.name }.copy())
# Get interfaces
interfaces = []
for interface in instance.interfaces:
interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
# If an instance is terminated, sourceDestCheck is no longer returned
try:
source_dest_check = instance.sourceDestCheck
except AttributeError:
source_dest_check = None
# Get block device mapping
try:
bdm_dict = []
bdm = getattr(instance, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict.append({
'device_name': device_name,
'status': bdm[device_name].status,
'volume_id': bdm[device_name].volume_id,
'delete_on_termination': bdm[device_name].delete_on_termination,
'attach_time': bdm[device_name].attach_time
})
except AttributeError:
pass
instance_info = { 'id': instance.id,
'kernel': instance.kernel,
'instance_profile': instance.instance_profile,
'root_device_type': instance.root_device_type,
'private_dns_name': instance.private_dns_name,
'public_dns_name': instance.public_dns_name,
'ebs_optimized': instance.ebs_optimized,
'client_token': instance.client_token,
'virtualization_type': instance.virtualization_type,
'architecture': instance.architecture,
'ramdisk': instance.ramdisk,
'tags': instance.tags,
'key_name': instance.key_name,
'source_destination_check': source_dest_check,
'image_id': instance.image_id,
'groups': groups,
'interfaces': interfaces,
'spot_instance_request_id': instance.spot_instance_request_id,
'requester_id': instance.requester_id,
'monitoring_state': instance.monitoring_state,
'placement': {
'tenancy': instance._placement.tenancy,
'zone': instance._placement.zone
},
'ami_launch_index': instance.ami_launch_index,
'launch_time': instance.launch_time,
'hypervisor': instance.hypervisor,
'region': instance.region.name,
'persistent': instance.persistent,
'private_ip_address': instance.private_ip_address,
'public_ip_address': instance.ip_address,
'state': instance._state.name,
'vpc_id': instance.vpc_id,
'block_device_mapping': bdm_dict,
}
return instance_info
def list_ec2_instances(connection, module):
filters = module.params.get("filters")
instance_dict_array = []
try:
all_instances = connection.get_only_instances(filters=filters)
except BotoServerError as e:
module.fail_json(msg=e.message)
for instance in all_instances:
instance_dict_array.append(get_instance_info(instance))
module.exit_json(instances=instance_dict_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters = dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
list_ec2_instances(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
yanchen036/tensorflow | tensorflow/python/ops/losses/util_test.py | 113 | 1898 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for losses util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops.losses import util
from tensorflow.python.platform import test
class LossesUtilTest(test.TestCase):
def testGetRegularizationLoss(self):
# Empty regularization collection should evaluate to 0.0.
with self.test_session():
self.assertEqual(0.0, util.get_regularization_loss().eval())
# Loss should sum.
ops.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(2.0))
ops.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(3.0))
with self.test_session():
self.assertEqual(5.0, util.get_regularization_loss().eval())
# Check scope capture mechanism.
with ops.name_scope('scope1'):
ops.add_to_collection(
ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(-1.0))
with self.test_session():
self.assertEqual(-1.0, util.get_regularization_loss('scope1').eval())
if __name__ == '__main__':
test.main()
| apache-2.0 |
JshWright/home-assistant | homeassistant/components/lock/__init__.py | 2 | 5325 | """
Component to interface with various locks that can be controlled remotely.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/lock/
"""
import asyncio
from datetime import timedelta
import functools as ft
import logging
import os
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, STATE_LOCKED, STATE_UNLOCKED,
STATE_UNKNOWN, SERVICE_LOCK, SERVICE_UNLOCK)
from homeassistant.components import group
ATTR_CHANGED_BY = 'changed_by'
DOMAIN = 'lock'
ENTITY_ID_ALL_LOCKS = group.ENTITY_ID_FORMAT.format('all_locks')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_LOCKS = 'all locks'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SCAN_INTERVAL = timedelta(seconds=30)
LOCK_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_CODE): cv.string,
})
_LOGGER = logging.getLogger(__name__)
def is_locked(hass, entity_id=None):
"""Return if the lock is locked based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_LOCKS
return hass.states.is_state(entity_id, STATE_LOCKED)
def lock(hass, entity_id=None, code=None):
"""Lock all or specified locks."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_LOCK, data)
def unlock(hass, entity_id=None, code=None):
"""Unlock all or specified locks."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_UNLOCK, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for locks."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LOCKS)
yield from component.async_setup(config)
@asyncio.coroutine
def async_handle_lock_service(service):
"""Handle calls to the lock services."""
target_locks = component.async_extract_from_service(service)
code = service.data.get(ATTR_CODE)
for entity in target_locks:
if service.service == SERVICE_LOCK:
yield from entity.async_lock(code=code)
else:
yield from entity.async_unlock(code=code)
update_tasks = []
for entity in target_locks:
if not entity.should_poll:
continue
update_coro = hass.async_add_job(
entity.async_update_ha_state(True))
if hasattr(entity, 'async_update'):
update_tasks.append(update_coro)
else:
yield from update_coro
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_UNLOCK, async_handle_lock_service,
descriptions.get(SERVICE_UNLOCK), schema=LOCK_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_LOCK, async_handle_lock_service,
descriptions.get(SERVICE_LOCK), schema=LOCK_SERVICE_SCHEMA)
return True
class LockDevice(Entity):
"""Representation of a lock."""
@property
def changed_by(self):
"""Last change triggered by."""
return None
# pylint: disable=no-self-use
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
@property
def is_locked(self):
"""Return true if the lock is locked."""
return None
def lock(self, **kwargs):
"""Lock the lock."""
raise NotImplementedError()
def async_lock(self, **kwargs):
"""Lock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.lock, **kwargs))
def unlock(self, **kwargs):
"""Unlock the lock."""
raise NotImplementedError()
def async_unlock(self, **kwargs):
"""Unlock the lock.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.unlock, **kwargs))
@property
def state_attributes(self):
"""Return the state attributes."""
if self.code_format is None:
return None
state_attr = {
ATTR_CODE_FORMAT: self.code_format,
ATTR_CHANGED_BY: self.changed_by
}
return state_attr
@property
def state(self):
"""Return the state."""
locked = self.is_locked
if locked is None:
return STATE_UNKNOWN
return STATE_LOCKED if locked else STATE_UNLOCKED
| apache-2.0 |
wickman/aurproxy | tellapart/aurproxy/share/calculator.py | 5 | 2394 | # Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from tellapart.aurproxy.audit import AuditItem
class ShareCalculator(object):
"""Calculates a share for an endpoint, applying configured adjustments..
A share is calculated from the perspective of a single endpoint vs a weight,
which is calculated given knowledge of the shares of other endpoints.
"""
def __init__(self, source, endpoint, signal_update_fn):
"""
Args:
source - Required aurproxy.source.Source.
endpoint - Required aurproxy.config.SourceEndpoint.
signal_update_fn - Required callback fn - used to signal need to update.
"""
self._source = source
self._endpoint = endpoint
self._signal_update_fn = signal_update_fn
share_adjusters = []
for sa_factory in source.share_adjuster_factories:
share_adjuster = sa_factory.build(endpoint=endpoint,
signal_update_fn=signal_update_fn)
share_adjusters.append(share_adjuster)
self._share_adjusters = share_adjusters
def start(self):
"""
Start configured share adjusters.
"""
for share_adjuster in self._share_adjusters:
share_adjuster.start()
def stop(self):
"""
Stop configured share adjusters.
"""
for share_adjuster in self._share_adjusters:
share_adjuster.stop()
@property
def auditable_share(self):
"""
Get a (float(share), AuditItem)).
"""
share_comps = [1.0]
share_comp_audits = [AuditItem('base', '1.0')]
for share_adjuster in self._share_adjusters:
share_comp, share_comp_audit = share_adjuster.auditable_share
share_comps.append(float(share_comp))
share_comp_audits.append(share_comp_audit)
share = reduce(operator.mul, share_comps)
audit = AuditItem('share', [share, share_comp_audits])
return share, audit
| apache-2.0 |
stargaser/herschel-archive | hsadownload/boundingbox.py | 1 | 3816 |
# coding: utf-8
# ## Four corners of Herschel images
# This function from http://stackoverflow.com/questions/13542855/python-help-to-implement-an-algorithm-to-find-the-minimum-area-rectangle-for-gi
#
# and working code (error in the original) at http://gis.stackexchange.com/questions/22895/how-to-find-the-minimum-area-rectangle-for-given-points/169633#169633
#
import numpy as np
from scipy.spatial import ConvexHull
def minimum_bounding_rectangle(points):
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
:param points: an nx2 matrix of coordinates
:rval: an 4x2 matrix of coordinates
"""
from scipy.ndimage.interpolation import rotate
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = np.zeros((len(hull_points)-1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = np.zeros((len(edges)))
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
#angles = np.unique(angles)
# find rotation matrices
# XXX both work
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
return rval
from scipy.optimize import OptimizeResult, minimize
from shapely.geometry import Polygon, MultiPoint
def fun(x, hull):
coords = x.reshape(4,2)
p = Polygon(coords)
area = p.difference(hull).area + hull.difference(p).area
return(area)
# ## How to find the minimum quadrilateral?
# Online research shows that there isn't such a convenient formula for finding the minimum quadrilateral. We have found the minimum bounding rectangle.
#
# We can try an iterative approach.
def fourCorners(fname, ext=1):
"""
Compute the four corners in celestial coordinates
Parameters:
-----------
fname (string): path to FITS image
ext (int): extension with WCS keywords and image (default 1)
Returns:
--------
corners: 4x2 numpy array of the four corners in (ra, dec) pairs
"""
import astropy.io.fits as fits
from astropy.wcs import WCS
import numpy as np
hdu = fits.open(fname)
w = WCS(hdu[ext].header)
imdata = hdu[1].data
grid = np.indices(imdata.shape)
coords = np.vstack([grid[1][np.isfinite(imdata)],grid[0][np.isfinite(imdata)]]).T
rect = minimum_bounding_rectangle(coords)
#
# Use the Scipy hull vertices because shapely's MultiPoint and convex_hull are so slow
hull_points = coords[ConvexHull(coords).vertices]
points = MultiPoint(hull_points)
hull = points.convex_hull
# From trial-and-error, the L-BFGS-B method works the best by far
res = minimize(lambda x: fun(x, hull), rect.flat, method='L-BFGS-B',
options={'ftol': 1e-4, 'disp': False, 'eps': 0.1})
corners = w.all_pix2world(res.x.reshape(4,2),0)
return(corners)
| bsd-3-clause |
elit3ge/SickRage | lib/github/StatsParticipation.py | 74 | 2654 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class StatsParticipation(github.GithubObject.NonCompletableGithubObject):
"""
This class represents statistics of participation. The reference can be found here http://developer.github.com/v3/repos/statistics/#get-the-weekly-commit-count-for-the-repo-owner-and-everyone-else
"""
@property
def all(self):
"""
:type: list of int
"""
return self._all.value
@property
def owner(self):
"""
:type: list of int
"""
return self._owner.value
def _initAttributes(self):
self._all = github.GithubObject.NotSet
self._owner = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "all" in attributes: # pragma no branch
self._all = self._makeListOfIntsAttribute(attributes["all"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeListOfIntsAttribute(attributes["owner"])
| gpl-3.0 |
alvarolopez/nova | nova/tests/unit/api/ec2/test_cloud.py | 8 | 147928 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import copy
import datetime
import functools
import os
import string
import tempfile
import fixtures
import iso8601
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
from nova import availability_zones
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import manager as compute_manager
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.image import s3
from nova.network import api as network_api
from nova.network import base_api as base_network_api
from nova.network import model
from nova.network.neutronv2 import api as neutronapi
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.unit.api.openstack.compute.contrib import (
test_neutron_security_groups as test_neutron)
from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_utils
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova import utils
from nova.virt import fake as fake_virt
from nova import volume
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('default_flavor', 'nova.compute.flavors')
CONF.import_opt('use_ipv6', 'nova.netconf')
LOG = logging.getLogger(__name__)
HOST = "testhost"
def get_fake_cache(get_floating):
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
if get_floating:
ip_info = [_ip('192.168.0.3',
floats=['1.2.3.4', '5.6.7.8']),
_ip('192.168.0.4')]
else:
ip_info = [_ip('192.168.0.3'),
_ip('192.168.0.4')]
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': ip_info}]}}]
if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return model.NetworkInfo.hydrate(info)
def get_instances_with_cached_ips(orig_func, get_floating,
*args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
if kwargs.get('want_objects', False):
info_cache = objects.InstanceInfoCache()
info_cache.network_info = get_fake_cache(get_floating)
info_cache.obj_reset_changes()
else:
info_cache = {'network_info': get_fake_cache(get_floating)}
if isinstance(instances, (list, obj_base.ObjectListBase)):
for instance in instances:
instance['info_cache'] = info_cache
else:
instances['info_cache'] = info_cache
return instances
class CloudTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(CloudTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
ec2utils.reset_cache()
availability_zones.reset_cache()
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.unit.fake_volume.API')
self.useFixture(fixtures.FakeLogger('boto'))
fake_utils.stub_out_utils_spawn_n(self.stubs)
def fake_show(meh, context, id, **kwargs):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available'}}
def fake_detail(_self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
fake.stub_out_image_service(self.stubs)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
fake_network.set_stub_network_methods(self.stubs)
# set up our cloud
self.cloud = cloud.CloudController()
self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
# Short-circuit the conductor service
self.flags(use_local=True, group='conductor')
# Stub out the notification service so we use the no-op serializer
# and avoid lazy-load traces with the wrap_exception decorator in
# the compute service.
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.scheduler = self.start_service('scheduler')
self.network = self.start_service('network')
self.consoleauth = self.start_service('consoleauth')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.volume_api = volume.API()
self.stubs.Set(compute_manager.ComputeManager,
'_update_scheduler_instance_info', dumb)
self.stubs.Set(compute_manager.ComputeManager,
'_delete_scheduler_instance_info', dumb)
self.stubs.Set(compute_manager.ComputeManager,
'_sync_scheduler_instance_info', dumb)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
# make sure we can map ami-00000001/2 to a uuid in FakeImageService
db.s3_image_create(self.context,
'cedef40a-ed67-4d10-800e-17455edce175')
db.s3_image_create(self.context,
'76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def tearDown(self):
self.volume_api.reset_fake_api(self.context)
super(CloudTestCase, self).tearDown()
fake.FakeImageService_reset()
def fake_get_target(obj, iqn):
return 1
def fake_remove_iscsi_target(obj, tid, lun, vol_id, **kwargs):
pass
def _stub_instance_get_with_fixed_ips(self,
func_name, get_floating=True):
orig_func = getattr(self.cloud.compute_api, func_name)
def fake_get(*args, **kwargs):
return get_instances_with_cached_ips(orig_func, get_floating,
*args, **kwargs)
self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
keypair_api = compute_api.KeypairAPI()
return keypair_api.create_key_pair(self.context, self.context.user_id,
name)
def test_describe_regions(self):
# Makes sure describe regions runs without raising an exception.
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
self.flags(region_list=["one=test_host1", "two=test_host2"])
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 2)
def test_describe_addresses(self):
# Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.flags(network_api_class='nova.network.api.API')
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_addresses_in_neutron(self):
# Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
self.flags(network_api_class='nova.network.neutronv2.api.API')
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_specific_address(self):
# Makes sure describe specific address works.
addresses = ["10.10.10.10", "10.10.10.11"]
for address in addresses:
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
result = self.cloud.describe_addresses(self.context)
self.assertEqual(len(result['addressesSet']), 2)
result = self.cloud.describe_addresses(self.context,
public_ip=['10.10.10.10'])
self.assertEqual(len(result['addressesSet']), 1)
for address in addresses:
self.cloud.release_address(self.context,
public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_allocate_address(self):
address = "10.10.10.10"
allocate = self.cloud.allocate_address
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.assertEqual(allocate(self.context)['publicIp'], address)
db.floating_ip_destroy(self.context, address)
# There are no longer any pools since the last one was destroyed above
pools = db.floating_ip_get_pools(self.context)
self.assertEqual(0, len(pools))
self.assertRaises(exception.FloatingIpPoolNotFound,
allocate,
self.context)
def test_release_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova',
'project_id': self.project_id})
result = self.cloud.release_address(self.context, address)
self.assertEqual(result.get('return', None), 'true')
def test_associate_disassociate_address(self):
# Verifies associate runs cleanly without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
# TODO(jkoelker) Probably need to query for instance_type_id and
# make sure we get a valid one
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
instance_uuid=inst['uuid'],
host=inst['host'],
vpn=None,
rxtx_factor=3,
project_id=project_id,
macs=None)
fixed_ips = nw_info.fixed_ips()
ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
lambda *args: {'fixed_ips': ['10.0.0.1'],
'fixed_ip6s': [],
'floating_ips': []})
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
def fake_update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
update_cells=True):
return
self.stubs.Set(base_network_api, "update_instance_cache_with_nw_info",
fake_update_instance_cache_with_nw_info)
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
self.cloud.disassociate_address(self.context,
public_ip=address)
self.cloud.release_address(self.context,
public_ip=address)
self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
inst['host'])
db.instance_destroy(self.context, inst['uuid'])
db.floating_ip_destroy(self.context, address)
def test_disassociate_auto_assigned_address(self):
"""Verifies disassociating auto assigned floating IP
raises an exception
"""
address = "10.10.10.10"
def fake_get(*args, **kwargs):
pass
def fake_disassociate_floating_ip(*args, **kwargs):
raise exception.CannotDisassociateAutoAssignedFloatingIP()
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
self.stubs.Set(network_api.API, 'disassociate_floating_ip',
fake_disassociate_floating_ip)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.cloud.disassociate_address,
self.context, public_ip=address)
def test_disassociate_unassociated_address(self):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.assertRaises(exception.InvalidAssociation,
self.cloud.disassociate_address,
self.context, public_ip=address)
db.floating_ip_destroy(self.context, address)
def test_describe_security_groups(self):
# Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[sec['name']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_all_tenants(self):
# Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': 'foobar',
'name': 'test'})
def _check_name(result, i, expected):
self.assertEqual(result['securityGroupInfo'][i]['groupName'],
expected)
# include all tenants
filter = [{'name': 'all-tenants', 'value': {'1': 1}}]
result = self.cloud.describe_security_groups(self.context,
filter=filter)
self.assertEqual(len(result['securityGroupInfo']), 2)
_check_name(result, 0, 'default')
_check_name(result, 1, sec['name'])
# exclude all tenants
filter = [{'name': 'all-tenants', 'value': {'1': 0}}]
result = self.cloud.describe_security_groups(self.context,
filter=filter)
self.assertEqual(len(result['securityGroupInfo']), 1)
_check_name(result, 0, 'default')
# default all tenants
result = self.cloud.describe_security_groups(self.context)
self.assertEqual(len(result['securityGroupInfo']), 1)
_check_name(result, 0, 'default')
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
result = self.cloud.describe_security_groups(self.context,
group_id=[sec['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
sec['name'])
default = db.security_group_get_by_name(self.context,
self.context.project_id,
'default')
result = self.cloud.describe_security_groups(self.context,
group_id=[default['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
'default')
db.security_group_destroy(self.context, sec['id'])
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
def test_security_group_quota_limit(self):
self.flags(quota_security_groups=10)
for i in range(1, CONF.quota_security_groups):
name = 'test name %i' % i
descript = 'test description %i' % i
create = self.cloud.create_security_group
create(self.context, name, descript)
# 11'th group should fail
self.assertRaises(exception.SecurityGroupLimitExceeded,
create, self.context, 'foo', 'bar')
def test_delete_security_group_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, group_id=sec['id']))
def test_delete_security_group_with_bad_name(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, 'badname')
def test_delete_security_group_with_bad_group_id(self):
delete = self.cloud.delete_security_group
notfound = exception.SecurityGroupNotFound
self.assertRaises(notfound, delete, self.context, group_id=999)
def test_delete_security_group_no_params(self):
delete = self.cloud.delete_security_group
self.assertRaises(exception.MissingParameter, delete, self.context)
def test_delete_security_group_policy_not_allowed(self):
rules = {'compute_extension:security_groups':
common_policy.parse_rule('project_id:%(project_id)s')}
policy.set_rules(rules)
with mock.patch.object(self.cloud.security_group_api,
'get') as get:
get.return_value = {'project_id': 'invalid'}
self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.delete_security_group, self.context,
'fake-name', 'fake-id')
def test_authorize_security_group_ingress_policy_not_allowed(self):
rules = {'compute_extension:security_groups':
common_policy.parse_rule('project_id:%(project_id)s')}
policy.set_rules(rules)
with mock.patch.object(self.cloud.security_group_api,
'get') as get:
get.return_value = {'project_id': 'invalid'}
self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.authorize_security_group_ingress, self.context,
'fake-name', 'fake-id')
def test_authorize_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges':
{'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_authorize_security_group_fail_missing_source_group(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
'2': {'cidr_ip': u'10.10.10.10/32'}},
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}},
'ip_protocol': u'tcp'}]}
self.assertRaises(exception.SecurityGroupNotFound, authz,
self.context, group_name=sec['name'], **kwargs)
def test_authorize_security_group_ingress_ip_permissions_groups(self):
kwargs = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'name': 'test'
}
db.security_group_create(self.context,
{'project_id': 'someuser',
'user_id': 'someuser',
'description': '',
'name': 'somegroup1'})
db.security_group_create(self.context,
{'project_id': 'someuser',
'user_id': 'someuser',
'description': '',
'name': 'othergroup2'})
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'},
'2': {'user_id': u'someuser',
'group_name': u'othergroup2'}},
'ip_protocol': u'tcp'}]}
self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
def test_describe_security_group_ingress_groups(self):
kwargs = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'name': 'test'
}
sec1 = db.security_group_create(self.context, kwargs)
sec2 = db.security_group_create(self.context,
{'project_id': 'someuser',
'user_id': 'someuser',
'description': '',
'name': 'somegroup1'})
sec3 = db.security_group_create(self.context,
{'project_id': 'someuser',
'user_id': 'someuser',
'description': '',
'name': 'othergroup2'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'ip_permissions': [
{'groups': {'1': {'user_id': u'someuser',
'group_name': u'somegroup1'}}},
{'ip_protocol': 'tcp',
'from_port': 80,
'to_port': 80,
'groups': {'1': {'user_id': u'someuser',
'group_name': u'othergroup2'}}}]}
self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEqual(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
self.assertEqual(len(actual_rules), 4)
expected_rules = [{'fromPort': -1,
'groups': [{'groupName': 'somegroup1',
'userId': 'someuser'}],
'ipProtocol': 'icmp',
'ipRanges': [],
'toPort': -1},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'tcp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 1,
'groups': [{'groupName': u'somegroup1',
'userId': u'someuser'}],
'ipProtocol': 'udp',
'ipRanges': [],
'toPort': 65535},
{'fromPort': 80,
'groups': [{'groupName': u'othergroup2',
'userId': u'someuser'}],
'ipProtocol': u'tcp',
'ipRanges': [],
'toPort': 80}]
for rule in expected_rules:
self.assertIn(rule, actual_rules)
db.security_group_destroy(self.context, sec3['id'])
db.security_group_destroy(self.context, sec2['id'])
db.security_group_destroy(self.context, sec1['id'])
def test_revoke_security_group_ingress_policy_not_allowed(self):
rules = {'compute_extension:security_groups':
common_policy.parse_rule('project_id:%(project_id)s')}
policy.set_rules(rules)
with mock.patch.object(self.cloud.security_group_api,
'get') as get:
get.return_value = {'project_id': 'invalid'}
self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.revoke_security_group_ingress, self.context,
'fake-name', 'fake-id')
def test_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
def test_authorize_revoke_security_group_ingress_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec['id'], **kwargs)
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
def test_authorize_security_group_ingress_missing_protocol_params(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.MissingParameter, authz, self.context,
'test')
def test_authorize_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
authz = self.cloud.authorize_security_group_ingress
self.assertRaises(exception.MissingParameter, authz, self.context,
**kwargs)
def test_authorize_security_group_ingress_already_exists(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
authz(self.context, group_name=sec['name'], **kwargs)
self.assertRaises(exception.SecurityGroupRuleExists, authz,
self.context, group_name=sec['name'], **kwargs)
def test_security_group_ingress_quota_limit(self):
self.flags(quota_security_group_rules=20)
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec_group = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
for i in range(100, 120):
kwargs = {'to_port': i, 'from_port': i, 'ip_protocol': 'tcp'}
authz(self.context, group_id=sec_group['id'], **kwargs)
kwargs = {'to_port': 121, 'from_port': 121, 'ip_protocol': 'tcp'}
self.assertRaises(exception.SecurityGroupLimitExceeded, authz,
self.context, group_id=sec_group['id'], **kwargs)
def _test_authorize_security_group_no_ports_with_source_group(self, proto):
kwargs = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'description': '',
'name': 'test'
}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
auth_kwargs = {'ip_protocol': proto,
'groups': {'1': {'user_id': self.context.user_id,
'group_name': u'test'}}}
self.assertTrue(authz(self.context, group_name=sec['name'],
**auth_kwargs))
describe = self.cloud.describe_security_groups
groups = describe(self.context, group_name=['test'])
self.assertEqual(len(groups['securityGroupInfo']), 1)
actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
expected_rules = [{'groups': [{'groupName': 'test',
'userId': self.context.user_id}],
'ipProtocol': proto,
'ipRanges': []}]
if proto == 'icmp':
expected_rules[0]['fromPort'] = -1
expected_rules[0]['toPort'] = -1
else:
expected_rules[0]['fromPort'] = 1
expected_rules[0]['toPort'] = 65535
self.assertTrue(expected_rules == actual_rules)
describe = self.cloud.describe_security_groups
describe(self.context, group_name=['test'])
db.security_group_destroy(self.context, sec['id'])
def _test_authorize_security_group_no_ports_no_source_group(self, proto):
kwargs = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'description': '',
'name': 'test'
}
sec = db.security_group_create(self.context, kwargs)
authz = self.cloud.authorize_security_group_ingress
auth_kwargs = {'ip_protocol': proto}
self.assertRaises(exception.MissingParameter, authz, self.context,
group_name=sec['name'], **auth_kwargs)
db.security_group_destroy(self.context, sec['id'])
def test_authorize_security_group_no_ports_icmp(self):
self._test_authorize_security_group_no_ports_with_source_group('icmp')
self._test_authorize_security_group_no_ports_no_source_group('icmp')
def test_authorize_security_group_no_ports_tcp(self):
self._test_authorize_security_group_no_ports_with_source_group('tcp')
self._test_authorize_security_group_no_ports_no_source_group('tcp')
def test_authorize_security_group_no_ports_udp(self):
self._test_authorize_security_group_no_ports_with_source_group('udp')
self._test_authorize_security_group_no_ports_no_source_group('udp')
def test_revoke_security_group_ingress_missing_group_name_or_id(self):
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
revoke = self.cloud.revoke_security_group_ingress
self.assertRaises(exception.MissingParameter, revoke,
self.context, **kwargs)
def test_delete_security_group_in_use_by_group(self):
self.cloud.create_security_group(self.context, 'testgrp1',
"test group 1")
self.cloud.create_security_group(self.context, 'testgrp2',
"test group 2")
kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
'group_name': u'testgrp2'}},
}
self.cloud.authorize_security_group_ingress(self.context,
group_name='testgrp1', **kwargs)
group1 = db.security_group_get_by_name(self.context,
self.project_id, 'testgrp1')
get_rules = db.security_group_rule_get_by_security_group
self.assertTrue(get_rules(self.context, group1['id']))
self.cloud.delete_security_group(self.context, 'testgrp2')
self.assertFalse(get_rules(self.context, group1['id']))
def test_delete_security_group_in_use_by_instance(self):
# Ensure that a group can not be deleted if in use by an instance.
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
args = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active'}
inst = db.instance_create(self.context, args)
args = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'name': 'testgrp',
'description': 'Test group'}
group = db.security_group_create(self.context, args)
db.instance_add_security_group(self.context, inst['uuid'], group['id'])
self.assertRaises(exception.InvalidGroup,
self.cloud.delete_security_group,
self.context, 'testgrp')
db.instance_destroy(self.context, inst['uuid'])
self.cloud.delete_security_group(self.context, 'testgrp')
def test_describe_availability_zones(self):
# Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
# Aggregate based zones
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
agg = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
result = self.cloud.describe_availability_zones(self.context)
self.assertEqual(len(result['availabilityZoneInfo']), 3)
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
self.assertEqual(len(result['availabilityZoneInfo']), 18)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def test_describe_availability_zones_verbose(self):
# Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
service2 = db.service_create(self.context, {'host': 'host2_zones',
'binary': "nova-compute",
'topic': 'compute',
'report_count': 0})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'second_zone'})
db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
admin_ctxt = context.get_admin_context(read_deleted="no")
result = self.cloud.describe_availability_zones(admin_ctxt,
zone_name='verbose')
self.assertEqual(len(result['availabilityZoneInfo']), 17)
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
def assertEqualSorted(self, x, y):
self.assertEqual(sorted(x), sorted(y))
def test_describe_instances(self):
# Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
sys_meta['EC2_client_token'] = "client-token-1"
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
sys_meta['EC2_client_token'] = "client-token-2"
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host2',
'hostname': 'server-4321',
'vm_state': 'active',
'system_metadata': sys_meta})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
agg = db.aggregate_create(self.context,
{'name': 'agg1'}, {'availability_zone': 'zone1'})
db.aggregate_host_add(self.context, agg['id'], 'host1')
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
agg2 = db.aggregate_create(self.context,
{'name': 'agg2'}, {'availability_zone': 'zone2'})
db.aggregate_host_add(self.context, agg2['id'], 'host2')
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
# Now try filtering.
instance_id = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['placement']['availabilityZone'], 'zone2')
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['tagSet'], [])
self.assertEqual(instance['privateDnsName'], 'server-4321')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertEqual(instance['dnsNameV6'],
'fe80:b33f::a8bb:ccff:fedd:eeff')
self.assertEqual(instance['clientToken'], 'client-token-2')
# A filter with even one invalid id should cause an exception to be
# raised
self.assertRaises(exception.InstanceNotFound,
self.cloud.describe_instances, self.context,
instance_id=[instance_id, '435679'])
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instances_all_invalid(self):
# Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
instance_id = ec2utils.id_to_ec2_inst_id('435679')
self.assertRaises(exception.InstanceNotFound,
self.cloud.describe_instances, self.context,
instance_id=[instance_id])
def test_describe_instances_with_filters(self):
# Makes sure describe_instances works and filters results.
filters = {'filter': [{'name': 'test',
'value': ['a', 'b']},
{'name': 'another_test',
'value': 'a string'}]}
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
result = self.cloud.describe_instances(self.context, **filters)
self.assertEqual(result, {'reservationSet': []})
def test_describe_instances_with_filters_tags(self):
# Makes sure describe_instances works and filters tag results.
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
utc = iso8601.iso8601.Utc()
flavor = flavors.get_flavor(1)
# Create some test images
sys_meta = flavors.save_flavor_info(
{}, flavor)
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': flavor.id,
'host': 'host1',
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc),
'system_metadata': sys_meta
}
inst2_kwargs = {
'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': flavor.id,
'host': 'host2',
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2,
tzinfo=utc),
'system_metadata': sys_meta
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
inst2 = db.instance_create(self.context, inst2_kwargs)
ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
# Create some tags
# We get one overlapping pair, one overlapping key, and a
# disparate pair
# inst1 : {'foo': 'bar', 'baz': 'wibble', 'bax': 'wobble'}
# inst2 : {'foo': 'bar', 'baz': 'quux', 'zog': 'bobble'}
md = {'key': 'foo', 'value': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
tag=[md])
md2 = {'key': 'baz', 'value': 'wibble'}
md3 = {'key': 'bax', 'value': 'wobble'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1],
tag=[md2, md3])
md4 = {'key': 'baz', 'value': 'quux'}
md5 = {'key': 'zog', 'value': 'bobble'}
self.cloud.create_tags(self.context, resource_id=[ec2_id2],
tag=[md4, md5])
# We should be able to search by:
inst1_ret = {
'groupSet': None,
'instancesSet': [{'amiLaunchIndex': None,
'dnsName': '1.2.3.4',
'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
'imageId': 'ami-00000001',
'instanceId': 'i-00000001',
'instanceState': {'code': 16,
'name': 'running'},
'instanceType': flavor.name,
'ipAddress': '1.2.3.4',
'keyName': 'None (None, host1)',
'launchTime':
datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc),
'placement': {
'availabilityZone': 'nova'},
'privateDnsName': u'server-1111',
'privateIpAddress': '192.168.0.3',
'productCodesSet': None,
'rootDeviceName': '/dev/sda1',
'rootDeviceType': 'instance-store',
'tagSet': [{'key': u'foo',
'value': u'bar'},
{'key': u'baz',
'value': u'wibble'},
{'key': u'bax',
'value': u'wobble'}]}],
'ownerId': None,
'reservationId': u'a'}
inst2_ret = {
'groupSet': None,
'instancesSet': [{'amiLaunchIndex': None,
'dnsName': '1.2.3.4',
'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
'imageId': 'ami-00000001',
'instanceId': 'i-00000002',
'instanceState': {'code': 16,
'name': 'running'},
'instanceType': flavor.name,
'ipAddress': '1.2.3.4',
'keyName': u'None (None, host2)',
'launchTime':
datetime.datetime(2012, 5, 1, 1, 1, 2,
tzinfo=utc),
'placement': {
'availabilityZone': 'nova'},
'privateDnsName': u'server-1112',
'privateIpAddress': '192.168.0.3',
'productCodesSet': None,
'rootDeviceName': '/dev/sda1',
'rootDeviceType': 'instance-store',
'tagSet': [{'key': u'foo',
'value': u'bar'},
{'key': u'baz',
'value': u'quux'},
{'key': u'zog',
'value': u'bobble'}]}],
'ownerId': None,
'reservationId': u'b'}
# No filter
result = self.cloud.describe_instances(self.context)
self.assertJsonEqual(result, {'reservationSet':
[inst1_ret, inst2_ret]})
# Key search
# Both should have tags with key 'foo' and value 'bar'
filters = {'filter': [{'name': 'tag:foo',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet':
[inst1_ret, inst2_ret]})
# Both should have tags with key 'foo'
filters = {'filter': [{'name': 'tag-key',
'value': ['foo']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet':
[inst1_ret, inst2_ret]})
# Value search
# Only inst2 should have tags with key 'baz' and value 'quux'
filters = {'filter': [{'name': 'tag:baz',
'value': ['quux']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet': [inst2_ret]})
# Only inst2 should have tags with value 'quux'
filters = {'filter': [{'name': 'tag-value',
'value': ['quux']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet': [inst2_ret]})
# Multiple values
# Both should have tags with key 'baz' and values in the set
# ['quux', 'wibble']
filters = {'filter': [{'name': 'tag:baz',
'value': ['quux', 'wibble']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet':
[inst1_ret, inst2_ret]})
# Both should have tags with key 'baz' or tags with value 'bar'
filters = {'filter': [{'name': 'tag-key',
'value': ['baz']},
{'name': 'tag-value',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet':
[inst1_ret, inst2_ret]})
# Confirm deletion of tags
# Check for format 'tag:'
self.cloud.delete_tags(self.context, resource_id=[ec2_id1], tag=[md])
filters = {'filter': [{'name': 'tag:foo',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet': [inst2_ret]})
# Check for format 'tag-'
filters = {'filter': [{'name': 'tag-key',
'value': ['foo']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet': [inst2_ret]})
filters = {'filter': [{'name': 'tag-value',
'value': ['bar']}]}
result = self.cloud.describe_instances(self.context, **filters)
self.assertJsonEqual(result, {'reservationSet': [inst2_ret]})
# destroy the test instances
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
def test_describe_instances_sorting(self):
# Makes sure describe_instances works and is sorted as expected.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst_base = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'system_metadata': sys_meta,
}
utc = iso8601.iso8601.Utc()
inst1_kwargs = {}
inst1_kwargs.update(inst_base)
inst1_kwargs['host'] = 'host1'
inst1_kwargs['hostname'] = 'server-1111'
inst1_kwargs['created_at'] = datetime.datetime(2012, 5, 1, 1, 1, 1,
tzinfo=utc)
inst1 = db.instance_create(self.context, inst1_kwargs)
inst2_kwargs = {}
inst2_kwargs.update(inst_base)
inst2_kwargs['host'] = 'host2'
inst2_kwargs['hostname'] = 'server-2222'
inst2_kwargs['created_at'] = datetime.datetime(2012, 2, 1, 1, 1, 1,
tzinfo=utc)
inst2 = db.instance_create(self.context, inst2_kwargs)
inst3_kwargs = {}
inst3_kwargs.update(inst_base)
inst3_kwargs['host'] = 'host3'
inst3_kwargs['hostname'] = 'server-3333'
inst3_kwargs['created_at'] = datetime.datetime(2012, 2, 5, 1, 1, 1,
tzinfo=utc)
inst3 = db.instance_create(self.context, inst3_kwargs)
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
comp2 = db.service_create(self.context, {'host': 'host2',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]['instancesSet']
self.assertEqual(result[0]['launchTime'], inst2_kwargs['created_at'])
self.assertEqual(result[1]['launchTime'], inst3_kwargs['created_at'])
self.assertEqual(result[2]['launchTime'], inst1_kwargs['created_at'])
db.instance_destroy(self.context, inst1['uuid'])
db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst3['uuid'])
db.service_destroy(self.context, comp1['id'])
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
# Makes sure describe_instances for instanceState works.
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
values = values or {}
values.update({'image_ref': image_uuid, 'instance_type_id': 1,
'power_state': power_state_, 'vm_state': vm_state_,
'system_metadata': sys_meta})
inst = db.instance_create(self.context, values)
instance_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
result = result['instancesSet'][0]['instanceState']
name = result['name']
code = result['code']
self.assertEqual(code, expected_code)
self.assertEqual(name, expected_name)
db.instance_destroy(self.context, inst['uuid'])
test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
power_state.RUNNING, vm_states.ACTIVE)
test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
power_state.NOSTATE, vm_states.STOPPED,
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
# Makes sure describe_instances w/ no ipv6 works.
self.flags(use_ipv6=False)
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
comp1 = db.service_create(self.context, {'host': 'host1',
'topic': "compute"})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
instance = result['instancesSet'][0]
instance_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
self.assertEqual(instance['instanceId'], instance_id)
self.assertEqual(instance['ipAddress'], '1.2.3.4')
self.assertEqual(instance['dnsName'], '1.2.3.4')
self.assertEqual(instance['privateDnsName'], 'server-1234')
self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
self.assertNotIn('dnsNameV6', instance)
db.instance_destroy(self.context, inst1['uuid'])
db.service_destroy(self.context, comp1['id'])
def test_describe_instances_deleted(self):
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context, inst1['uuid'])
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 1)
result1 = result['reservationSet'][0]['instancesSet']
self.assertEqual(result1[0]['instanceId'],
ec2utils.id_to_ec2_inst_id(inst2['uuid']))
def test_describe_instances_with_image_deleted(self):
image_uuid = 'aebef54a-ed67-4d10-912f-14455edce176'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
args1 = {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'vm_state': 'active',
'system_metadata': sys_meta}
db.instance_create(self.context, args2)
result = self.cloud.describe_instances(self.context)
self.assertEqual(len(result['reservationSet']), 2)
def test_describe_instances_dnsName_set(self):
# Verifies dnsName doesn't get set if floating IP is set.
self._stub_instance_get_with_fixed_ips('get_all', get_floating=False)
self._stub_instance_get_with_fixed_ips('get', get_floating=False)
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
db.instance_create(self.context, {'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'host': 'host1',
'hostname': 'server-1234',
'vm_state': 'active',
'system_metadata': sys_meta})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
instance = result['instancesSet'][0]
self.assertIsNone(instance['dnsName'])
def test_describe_instances_booting_from_a_volume(self):
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor(1))
inst = objects.Instance(self.context)
inst.reservation_id = 'a'
inst.image_ref = ''
inst.root_device_name = '/dev/sdh'
inst.instance_type_id = 1
inst.vm_state = vm_states.ACTIVE
inst.host = 'host1'
inst.system_metadata = sys_meta
inst.create()
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
instance = result['instancesSet'][0]
self.assertIsNone(instance['imageId'])
def test_describe_images(self):
describe_images = self.cloud.describe_images
def fake_detail(meh, context, **kwargs):
return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}}]
def fake_show_none(meh, context, id):
raise exception.ImageNotFound(image_id='bad_image_id')
def fake_detail_none(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
# list all
result1 = describe_images(self.context)
result1 = result1['imagesSet'][0]
self.assertEqual(result1['imageId'], 'ami-00000001')
# provided a valid image_id
result2 = describe_images(self.context, ['ami-00000001'])
self.assertEqual(1, len(result2['imagesSet']))
# provide more than 1 valid image_id
result3 = describe_images(self.context, ['ami-00000001',
'ami-00000002'])
self.assertEqual(2, len(result3['imagesSet']))
# provide a non-existing image_id
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
def assertDictListUnorderedMatch(self, L1, L2, key):
self.assertEqual(len(L1), len(L2))
for d1 in L1:
self.assertIn(key, d1)
for d2 in L2:
self.assertIn(key, d2)
if d1[key] == d2[key]:
self.assertThat(d1, matchers.DictMatches(d2))
def _setUpImageSet(self, create_volumes_and_snapshots=False):
self.flags(max_local_block_devices=-1)
mappings1 = [
{'device': '/dev/sda1', 'virtual': 'root'},
{'device': 'sdb0', 'virtual': 'ephemeral0'},
{'device': 'sdb1', 'virtual': 'ephemeral1'},
{'device': 'sdb2', 'virtual': 'ephemeral2'},
{'device': 'sdb3', 'virtual': 'ephemeral3'},
{'device': 'sdb4', 'virtual': 'ephemeral4'},
{'device': 'sdc0', 'virtual': 'swap'},
{'device': 'sdc1', 'virtual': 'swap'},
{'device': 'sdc2', 'virtual': 'swap'},
{'device': 'sdc3', 'virtual': 'swap'},
{'device': 'sdc4', 'virtual': 'swap'}]
block_device_mapping1 = [
{'device_name': '/dev/sdb1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
{'device_name': '/dev/sdb2',
'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
{'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
{'device_name': '/dev/sdb4', 'no_device': True},
{'device_name': '/dev/sdc1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
{'device_name': '/dev/sdc2',
'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
{'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
{'device_name': '/dev/sdc4', 'no_device': True}]
image1 = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine',
'image_state': 'available',
'mappings': mappings1,
'block_device_mapping': block_device_mapping1,
}
}
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7',
'volume_id': None}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'type': 'machine',
'root_device_name': '/dev/sdb1',
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
def fake_show(meh, context, image_id, **kwargs):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
return i
raise exception.ImageNotFound(image_id=image_id)
def fake_detail(meh, context, **kwargs):
return [copy.deepcopy(image1), copy.deepcopy(image2)]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
volumes = []
snapshots = []
if create_volumes_and_snapshots:
for bdm in block_device_mapping1:
if 'volume_id' in bdm:
vol = self._volume_create(bdm['volume_id'])
volumes.append(vol['id'])
if 'snapshot_id' in bdm:
snap = self._snapshot_create(bdm['snapshot_id'])
snapshots.append(snap['id'])
return (volumes, snapshots)
def _assertImageSet(self, result, root_device_type, root_device_name):
self.assertEqual(1, len(result['imagesSet']))
result = result['imagesSet'][0]
self.assertIn('rootDeviceType', result)
self.assertEqual(result['rootDeviceType'], root_device_type)
self.assertIn('rootDeviceName', result)
self.assertEqual(result['rootDeviceName'], root_device_name)
self.assertIn('blockDeviceMapping', result)
return result
_expected_root_device_name1 = '/dev/sda1'
# NOTE(yamahata): noDevice doesn't make sense when returning mapping
# It makes sense only when user overriding existing
# mapping.
_expected_bdms1 = [
{'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
{'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
'snap-00000001'}},
{'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
'vol-00000001'}},
{'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
# {'deviceName': '/dev/sdb4', 'noDevice': True},
{'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
{'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
'snap-00000002'}},
{'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
'vol-00000002'}},
{'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
# {'deviceName': '/dev/sdc4', 'noDevice': True}
]
_expected_root_device_name2 = '/dev/sdb1'
_expected_bdms2 = [{'deviceName': '/dev/sdb1',
'ebs': {'snapshotId': 'snap-00000003'}}]
# NOTE(yamahata):
# InstanceBlockDeviceMappingItemType
# rootDeviceType
# rootDeviceName
# blockDeviceMapping
# deviceName
# virtualName
# ebs
# snapshotId
# volumeSize
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
# test for rootDeviceName and blockDeviceMapping.
describe_images = self.cloud.describe_images
self._setUpImageSet()
result = describe_images(self.context, ['ami-00000001'])
result = self._assertImageSet(result, 'instance-store',
self._expected_root_device_name1)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_images(self.context, ['ami-00000002'])
result = self._assertImageSet(result, 'ebs',
self._expected_root_device_name2)
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
def fake_show(meh, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'is_public': True}
def fake_detail(self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
result = describe_image_attribute(self.context, 'ami-00000001',
'launchPermission')
self.assertEqual([{'group': 'all'}], result['launchPermission'])
result = describe_image_attribute(self.context, 'ami-00000001',
'kernel')
self.assertEqual('aki-00000001', result['kernel']['value'])
result = describe_image_attribute(self.context, 'ami-00000001',
'ramdisk')
self.assertEqual('ari-00000001', result['ramdisk']['value'])
def test_describe_image_attribute_root_device_name(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name1)
result = describe_image_attribute(self.context, 'ami-00000002',
'rootDeviceName')
self.assertEqual(result['rootDeviceName'],
self._expected_root_device_name2)
def test_describe_image_attribute_block_device_mapping(self):
describe_image_attribute = self.cloud.describe_image_attribute
self._setUpImageSet()
result = describe_image_attribute(self.context, 'ami-00000001',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms1, 'deviceName')
result = describe_image_attribute(self.context, 'ami-00000002',
'blockDeviceMapping')
self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
self._expected_bdms2, 'deviceName')
def test_modify_image_attribute(self):
modify_image_attribute = self.cloud.modify_image_attribute
fake_metadata = {
'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'is_public': False}
def fake_show(meh, context, id, **kwargs):
return copy.deepcopy(fake_metadata)
def fake_detail(self, context, **kwargs):
image = fake_show(None, context, None)
image['name'] = kwargs.get('filters', {}).get('name')
return [image]
def fake_update(meh, context, image_id, metadata, data=None):
self.assertEqual(metadata['properties']['kernel_id'],
fake_metadata['properties']['kernel_id'])
self.assertEqual(metadata['properties']['ramdisk_id'],
fake_metadata['properties']['ramdisk_id'])
self.assertTrue(metadata['is_public'])
image = copy.deepcopy(fake_metadata)
image.update(metadata)
return image
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
self.stubs.Set(fake._FakeImageService, 'update', fake_update)
result = modify_image_attribute(self.context, 'ami-00000001',
'launchPermission', 'add',
user_group=['all'])
self.assertTrue(result['is_public'])
def test_register_image(self):
register_image = self.cloud.register_image
def fake_create(*args, **kwargs):
# NOTE(vish): We are mocking s3 so make sure we have converted
# to ids instead of uuids.
return {'id': 1,
'name': 'fake_name',
'container_format': 'ami',
'properties': {'kernel_id': 1,
'ramdisk_id': 1,
'type': 'machine'
},
'is_public': False
}
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
image_location = 'fake_bucket/fake.img.manifest.xml'
result = register_image(self.context, image_location)
self.assertEqual(result['imageId'], 'ami-00000001')
def test_register_image_empty(self):
register_image = self.cloud.register_image
self.assertRaises(exception.MissingParameter, register_image,
self.context, image_location=None)
def test_register_image_name(self):
register_image = self.cloud.register_image
def fake_create(_self, context, metadata, data=None):
self.assertEqual(metadata['name'], self.expected_name)
metadata['id'] = 1
metadata['container_format'] = 'ami'
metadata['is_public'] = False
return metadata
self.stubs.Set(s3.S3ImageService, 'create', fake_create)
self.expected_name = 'fake_bucket/fake.img.manifest.xml'
register_image(self.context,
image_location=self.expected_name,
name=None)
self.expected_name = 'an image name'
register_image(self.context,
image_location='some_location',
name=self.expected_name)
def test_format_image(self):
image = {
'id': 1,
'container_format': 'ami',
'name': 'name',
'owner': 'someone',
'properties': {
'image_location': 'location',
'kernel_id': 1,
'ramdisk_id': 1,
'type': 'machine'},
'is_public': False}
expected = {'name': 'name',
'imageOwnerId': 'someone',
'isPublic': False,
'imageId': 'ami-00000001',
'imageState': None,
'rootDeviceType': 'instance-store',
'architecture': None,
'imageLocation': 'location',
'kernelId': 'aki-00000001',
'ramdiskId': 'ari-00000001',
'rootDeviceName': '/dev/sda1',
'imageType': 'machine',
'description': None}
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
image['properties']['image_location'] = None
expected['imageLocation'] = 'None (name)'
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
image['name'] = None
image['properties']['image_location'] = 'location'
expected['imageLocation'] = 'location'
expected['name'] = 'location'
result = self.cloud._format_image(image)
self.assertThat(result, matchers.DictMatches(expected))
def test_deregister_image(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
# valid image
result = deregister_image(self.context, 'ami-00000001')
self.assertTrue(result)
# invalid image
self.stubs.UnsetAll()
def fake_detail_empty(self, context, **kwargs):
return []
self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
def test_deregister_image_wrong_container_type(self):
deregister_image = self.cloud.deregister_image
def fake_delete(self, context, id):
return None
self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
self.assertRaises(exception.NotFound, deregister_image, self.context,
'aki-00000001')
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
def test_get_password_data(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=CONF.default_flavor,
max_count=1)
self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
output = self.cloud.get_password_data(context=self.context,
instance_id=[instance_id])
self.assertEqual(output['passwordData'], 'fakepass')
self.cloud.terminate_instances(self.context, [instance_id])
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
instance_type=CONF.default_flavor,
max_count=1)
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEqual(base64.b64decode(output['output']),
'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
self.cloud.terminate_instances(self.context, [instance_id])
def test_key_generation(self):
result, private_key = self._create_key('test')
expected = db.key_pair_get(self.context,
self.context.user_id,
'test')['public_key']
(fd, fname) = tempfile.mkstemp()
os.write(fd, private_key)
public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
os.unlink(fname)
# assert key fields are equal
self.assertEqual(''.join(public_key.split("\n")[2:-2]),
expected.split(" ")[1].strip())
def test_describe_key_pairs(self):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
keys = result["keySet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
def test_describe_bad_key_pairs(self):
self.assertRaises(exception.KeypairNotFound,
self.cloud.describe_key_pairs, self.context,
key_name=['DoesNotExist'])
def test_import_key_pair(self):
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
with open(pubkey_path + '/dummy.pub') as f:
dummypub = f.readline().rstrip()
with open(pubkey_path + '/dummy.fingerprint') as f:
dummyfprint = f.readline().rstrip()
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
result = self.cloud.import_key_pair(self.context,
key_name,
public_key_material)
self.assertEqual(result['keyName'], key_name)
self.assertEqual(result['keyFingerprint'], dummyfprint)
keydata = db.key_pair_get(self.context,
self.context.user_id,
key_name)
self.assertEqual(dummypub, keydata['public_key'])
self.assertEqual(dummyfprint, keydata['fingerprint'])
def test_import_key_pair_quota_limit(self):
self.flags(quota_key_pairs=0)
pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
f = open(pubkey_path + '/dummy.pub', 'r')
dummypub = f.readline().rstrip()
f.close
f = open(pubkey_path + '/dummy.fingerprint', 'r')
f.readline().rstrip()
f.close
key_name = 'testimportkey'
public_key_material = base64.b64encode(dummypub)
self.assertRaises(exception.KeypairLimitExceeded,
self.cloud.import_key_pair, self.context, key_name,
public_key_material)
def test_create_key_pair(self):
good_names = ('a', 'a' * 255, string.ascii_letters + ' -_')
bad_names = ('', 'a' * 256, '*', '/')
with mock.patch.object(self.cloud.keypair_api,
'_generate_key_pair') as mock_generate_key_pair:
mock_generate_key_pair.return_value = (
"private_key", "public_key", "fingerprint")
for key_name in good_names:
result = self.cloud.create_key_pair(self.context,
key_name)
self.assertEqual(result['keyName'], key_name)
for key_name in bad_names:
self.assertRaises(exception.InvalidKeypair,
self.cloud.create_key_pair,
self.context,
key_name)
def test_create_key_pair_quota_limit(self):
with mock.patch.object(self.cloud.keypair_api,
'_generate_key_pair') as mock_generate_key_pair:
mock_generate_key_pair.return_value = (
"private_key", "public_key", "fingerprint")
self.flags(quota_key_pairs=10)
for i in range(0, 10):
key_name = 'key_%i' % i
result = self.cloud.create_key_pair(self.context,
key_name)
self.assertEqual(result['keyName'], key_name)
# 11'th group should fail
self.assertRaises(exception.KeypairLimitExceeded,
self.cloud.create_key_pair,
self.context,
'foo')
def test_delete_key_pair(self):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
def test_run_instances(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['imageId'], 'ami-00000001')
self.assertEqual(instance['instanceId'], 'i-00000001')
self.assertEqual(instance['instanceState']['name'], 'running')
self.assertEqual(instance['instanceType'], 'm1.small')
def test_run_instances_invalid_maxcount(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 0}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InvalidInput, run_instances,
self.context, **kwargs)
def test_run_instances_invalid_mincount(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'min_count': 0}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InvalidInput, run_instances,
self.context, **kwargs)
def test_run_instances_invalid_count(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1,
'min_count': 2}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.assertRaises(exception.InvalidInput, run_instances,
self.context, **kwargs)
def test_run_instances_availability_zone(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1,
'placement': {'availability_zone': 'fake'},
}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
def fake_format(*args, **kwargs):
pass
self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['availability_zone'], 'fake')
return ({'id': 'fake-instance'}, 'fake-res-id')
self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
# NOTE(vish) the assert for this call is in the fake_create method.
run_instances(self.context, **kwargs)
def test_empty_reservation_id_from_token(self):
client_token = 'client-token-1'
def fake_get_all_system_metadata(context, search_filts):
reference = [{'key': ['EC2_client_token']},
{'value': ['client-token-1']}]
self.assertEqual(search_filts, reference)
return []
self.stubs.Set(self.cloud.compute_api, 'get_all_system_metadata',
fake_get_all_system_metadata)
resv_id = self.cloud._resv_id_from_token(self.context, client_token)
self.assertIsNone(resv_id)
def test_run_instances_idempotent(self):
# Ensure subsequent run_instances calls with same client token
# are idempotent and that ones with different client_token are not
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'container_format': 'ami',
'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def dumb(*args, **kwargs):
pass
self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
kwargs['client_token'] = 'client-token-1'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000001')
kwargs['client_token'] = 'client-token-2'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000002')
kwargs['client_token'] = 'client-token-2'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000002')
kwargs['client_token'] = 'client-token-1'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000001')
kwargs['client_token'] = 'client-token-3'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000003')
# make sure terminated instances lose their client tokens
self.cloud.stop_instances(self.context,
instance_id=[instance['instanceId']])
self.cloud.terminate_instances(self.context,
instance_id=[instance['instanceId']])
kwargs['client_token'] = 'client-token-3'
result = run_instances(self.context, **kwargs)
instance = result['instancesSet'][0]
self.assertEqual(instance['instanceId'], 'i-00000004')
def test_run_instances_image_state_none(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_no_state(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}, 'container_format': 'ami'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
self.assertRaises(exception.ImageNotActive, run_instances,
self.context, **kwargs)
def test_run_instances_image_state_invalid(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_decrypt(self, context, id):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'status': 'active',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
self.assertRaises(exception.ImageNotActive, run_instances,
self.context, **kwargs)
def test_run_instances_image_status_active(self):
kwargs = {'image_id': 'ami-00000001',
'instance_type': CONF.default_flavor,
'max_count': 1}
run_instances = self.cloud.run_instances
def fake_show_stat_active(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
'properties': {
'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'},
'status': 'active'}
def fake_id_to_glance_id(context, id):
return 'cedef40a-ed67-4d10-800e-17455edce175'
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
result = run_instances(self.context, **kwargs)
self.assertEqual(len(result['instancesSet']), 1)
def _restart_compute_service(self, periodic_interval_max=None):
"""restart compute service. NOTE: fake driver forgets all instances."""
self.compute.kill()
if periodic_interval_max:
self.compute = self.start_service(
'compute', periodic_interval_max=periodic_interval_max)
else:
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
# Makes sure stop/start instance works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 64,
'name': 'stopping'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 0,
'name': 'pending'}}]}
result = self.cloud.start_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 64,
'name': 'stopping'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 32,
'name': 'shutting-down'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
def test_start_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 0,
'name': 'pending'}}]}
result = self.cloud.start_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 32,
'name': 'shutting-down'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_start_instances_policy_failed(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
rules = {
"compute:start":
common_policy.parse_rule("project_id:non_fake"),
}
policy.set_rules(rules)
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.start_instances,
self.context, [instance_id])
self.assertIn("compute:start", exc.format_message())
self._restart_compute_service()
def test_stop_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 64,
'name': 'stopping'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 32,
'name': 'shutting-down'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_stop_instances_policy_failed(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
rules = {
"compute:stop":
common_policy.parse_rule("project_id:non_fake")
}
policy.set_rules(rules)
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.cloud.stop_instances,
self.context, [instance_id])
self.assertIn("compute:stop", exc.format_message())
self._restart_compute_service()
def test_terminate_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 32,
'name': 'shutting-down'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_invalid_instance_id(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
self._run_instance(**kwargs)
self.assertRaises(exception.InstanceNotFound,
self.cloud.terminate_instances,
self.context, ['i-2'])
self._restart_compute_service()
def test_terminate_instances_disable_terminate(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
ec2utils.ec2_id_to_id(instance_id))
db.instance_update(self.context, internal_uuid,
{'disable_terminate': True})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 16,
'name': 'running'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
db.instance_update(self.context, internal_uuid,
{'disable_terminate': False})
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 32,
'name': 'shutting-down'}}]}
result = self.cloud.terminate_instances(self.context, [instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_terminate_instances_two_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
inst1 = self._run_instance(**kwargs)
inst2 = self._run_instance(**kwargs)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 64,
'name': 'stopping'}}]}
result = self.cloud.stop_instances(self.context, [inst1])
self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
'previousState': {'code': 80,
'name': 'stopped'},
'currentState': {'code': 32,
'name': 'shutting-down'}},
{'instanceId': 'i-00000002',
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 32,
'name': 'shutting-down'}}]}
result = self.cloud.terminate_instances(self.context, [inst1, inst2])
self.assertEqual(result, expected)
self._restart_compute_service()
def test_reboot_instances(self):
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
# a running instance can't be started.
self.assertRaises(exception.InstanceInvalidState,
self.cloud.start_instances,
self.context, [instance_id])
result = self.cloud.reboot_instances(self.context, [instance_id])
self.assertTrue(result)
def _volume_create(self, volume_id=None):
kwargs = {'name': 'test-volume',
'description': 'test volume description',
'status': 'available',
'host': 'fake',
'size': 1,
'attach_status': 'detached'}
if volume_id:
kwargs['volume_id'] = volume_id
return self.volume_api.create_with_kwargs(self.context, **kwargs)
def _snapshot_create(self, snapshot_id=None):
kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
'status': "available",
'volume_size': 1}
if snapshot_id:
kwargs['snap_id'] = snapshot_id
return self.volume_api.create_snapshot_with_kwargs(self.context,
**kwargs)
def _create_snapshot(self, ec2_volume_id):
result = self.cloud.create_snapshot(self.context,
volume_id=ec2_volume_id)
return result['snapshotId']
def _do_test_create_image(self, no_reboot):
"""Make sure that CreateImage works."""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
def fake_show(meh, context, id, **kwargs):
bdm = [dict(snapshot_id=snapshots[0],
volume_size=1,
device_name='sda1',
delete_on_termination=False)]
props = dict(kernel_id='cedef40a-ed67-4d10-800e-17455edce175',
ramdisk_id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
root_device_name='/dev/sda1',
block_device_mapping=bdm)
return dict(id=id,
properties=props,
container_format='ami',
status='active',
is_public=True)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': volumes[0],
'snapshot_id': snapshots[0],
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'sda1',
'boot_index': 0,
'delete_on_termination': False,
'connection_info': '{"foo":"bar"}',
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
virt_driver = {}
def fake_power_on(self, context, instance, network_info,
block_device_info):
virt_driver['powered_on'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
def fake_power_off(self, instance,
shutdown_timeout, shutdown_attempts):
virt_driver['powered_off'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
result = self.cloud.create_image(self.context, ec2_instance_id,
no_reboot=no_reboot)
ec2_ids = [result['imageId']]
created_image = self.cloud.describe_images(self.context,
ec2_ids)['imagesSet'][0]
self.assertIn('blockDeviceMapping', created_image)
bdm = created_image['blockDeviceMapping'][0]
self.assertEqual(bdm.get('deviceName'), 'sda1')
self.assertIn('ebs', bdm)
self.assertEqual(bdm['ebs'].get('snapshotId'),
ec2utils.id_to_ec2_snap_id(snapshots[0]))
self.assertEqual(created_image.get('kernelId'), 'aki-00000001')
self.assertEqual(created_image.get('ramdiskId'), 'ari-00000002')
self.assertEqual(created_image.get('rootDeviceType'), 'ebs')
self.assertNotEqual(virt_driver.get('powered_on'), no_reboot)
self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
self.cloud.terminate_instances(self.context, [ec2_instance_id])
self._restart_compute_service()
def test_create_image_no_reboot(self):
# Make sure that CreateImage works.
self._do_test_create_image(True)
def test_create_image_with_reboot(self):
# Make sure that CreateImage works.
self._do_test_create_image(False)
def test_create_image_instance_store(self):
"""Ensure CreateImage fails as expected for an instance-store-backed
instance
"""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
(volumes, snapshots) = self._setUpImageSet(
create_volumes_and_snapshots=True)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': volumes[0],
'snapshot_id': snapshots[0],
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
self.assertRaises(exception.InvalidParameterValue,
self.cloud.create_image,
self.context,
ec2_instance_id,
no_reboot=True)
@staticmethod
def _fake_bdm_get(ctxt, id, use_slave=False):
blockdms = [{'volume_id': 87654321,
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'no_device': None,
'delete_on_termination': True,
'device_name': '/dev/sdh'},
{'volume_id': None,
'snapshot_id': 98765432,
'source_type': 'snapshot',
'destination_type': 'volume',
'no_device': None,
'delete_on_termination': True,
'device_name': '/dev/sdi'},
{'volume_id': None,
'snapshot_id': None,
'no_device': True,
'source_type': 'blank',
'destination_type': None,
'delete_on_termination': None,
'device_name': None},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdb'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
'delete_on_termination': None,
'device_name': '/dev/sdc'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sdd'},
{'volume_id': None,
'snapshot_id': None,
'no_device': None,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': None,
'delete_on_termination': None,
'device_name': '/dev/sd3'},
]
extra = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 0,
'device_type': None,
'disk_bus': None,
'instance_uuid': '',
'image_id': None,
'volume_size': None,
'connection_info': None,
'boot_index': None,
'guest_format': None,
}
for bdm in blockdms:
bdm.update(extra)
return blockdms
def test_describe_instance_attribute(self):
# Make sure that describe_instance_attribute works.
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
self._fake_bdm_get)
def fake_get(ctxt, instance_id, want_objects=False):
self.assertTrue(want_objects)
inst_type = flavors.get_default_flavor()
inst_type['name'] = 'fake_type'
sys_meta = flavors.save_flavor_info({}, inst_type)
secgroups = objects.SecurityGroupList()
secgroups.objects.append(
objects.SecurityGroup(name='fake0'))
secgroups.objects.append(
objects.SecurityGroup(name='fake1'))
instance = objects.Instance(ctxt)
instance.id = 0
instance.uuid = 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
instance.root_device_name = '/dev/sdh'
instance.security_groups = secgroups
instance.vm_state = vm_states.STOPPED
instance.kernel_id = 'cedef40a-ed67-4d10-800e-17455edce175'
instance.ramdisk_id = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
instance.user_data = 'fake-user data'
instance.shutdown_terminate = False
instance.disable_terminate = False
instance.system_metadata = sys_meta
return instance
self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
def fake_ec2_instance_get_by_id(ctxt, int_id):
if int_id == 305419896:
fake_map = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 305419896,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
}
return fake_map
raise exception.InstanceNotFound(instance_id=int_id)
self.stubs.Set(db, 'ec2_instance_get_by_id',
fake_ec2_instance_get_by_id)
get_attribute = functools.partial(
self.cloud.describe_instance_attribute,
self.context, 'i-12345678')
bdm = get_attribute('blockDeviceMapping')
bdm['blockDeviceMapping'].sort()
expected_bdm = {'instance_id': 'i-12345678',
'rootDeviceType': 'ebs',
'blockDeviceMapping': [
{'deviceName': '/dev/sdh',
'ebs': {'status': 'attached',
'deleteOnTermination': True,
'volumeId': 'vol-05397fb1',
'attachTime': '13:56:24'}}]}
expected_bdm['blockDeviceMapping'].sort()
self.assertEqual(bdm, expected_bdm)
groupSet = get_attribute('groupSet')
groupSet['groupSet'].sort()
expected_groupSet = {'instance_id': 'i-12345678',
'groupSet': [{'groupId': 'fake0'},
{'groupId': 'fake1'}]}
expected_groupSet['groupSet'].sort()
self.assertEqual(groupSet, expected_groupSet)
self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
{'instance_id': 'i-12345678',
'instanceInitiatedShutdownBehavior': 'stop'})
self.assertEqual(get_attribute('disableApiTermination'),
{'instance_id': 'i-12345678',
'disableApiTermination': False})
self.assertEqual(get_attribute('instanceType'),
{'instance_id': 'i-12345678',
'instanceType': 'fake_type'})
self.assertEqual(get_attribute('kernel'),
{'instance_id': 'i-12345678',
'kernel': 'aki-00000001'})
self.assertEqual(get_attribute('ramdisk'),
{'instance_id': 'i-12345678',
'ramdisk': 'ari-00000002'})
self.assertEqual(get_attribute('rootDeviceName'),
{'instance_id': 'i-12345678',
'rootDeviceName': '/dev/sdh'})
# NOTE(yamahata): this isn't supported
# get_attribute('sourceDestCheck')
self.assertEqual(get_attribute('userData'),
{'instance_id': 'i-12345678',
'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
def test_instance_initiated_shutdown_behavior(self):
def test_dia_iisb(expected_result, **kwargs):
"""test describe_instance_attribute
attribute instance_initiated_shutdown_behavior
"""
kwargs.update({'instance_type': CONF.default_flavor,
'max_count': 1})
instance_id = self._run_instance(**kwargs)
result = self.cloud.describe_instance_attribute(self.context,
instance_id, 'instanceInitiatedShutdownBehavior')
self.assertEqual(result['instanceInitiatedShutdownBehavior'],
expected_result)
expected = {'instancesSet': [
{'instanceId': instance_id,
'previousState': {'code': 16,
'name': 'running'},
'currentState': {'code': 32,
'name': 'shutting-down'}}]}
result = self.cloud.terminate_instances(self.context,
[instance_id])
self.assertEqual(result, expected)
self._restart_compute_service()
test_dia_iisb('stop', image_id='ami-1')
block_device_mapping = [{'device_name': '/dev/vdb',
'virtual_name': 'ephemeral0'}]
test_dia_iisb('stop', image_id='ami-2',
block_device_mapping=block_device_mapping)
def fake_show(self, context, id_, **kwargs):
LOG.debug("id_ %s", id_)
prop = {}
if id_ == 'ami-3':
pass
elif id_ == 'ami-4':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}]}
elif id_ == 'ami-5':
prop = {'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
elif id_ == 'ami-6':
prop = {'mappings': [{'device': 'sdb0',
'virtual': 'ephemeral0'}],
'block_device_mapping':
[{'device_name': '/dev/sdb0',
'virtual_name': 'ephemeral0'}]}
prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'type': 'machine'}
prop_base.update(prop)
return {
'id': id_,
'name': 'fake_name',
'properties': prop_base,
'container_format': 'ami',
'status': 'active'}
# NOTE(yamahata): create ami-3 ... ami-7
# ami-1 and ami-2 is already created by setUp()
for i in range(3, 8):
db.s3_image_create(self.context, 'ami-%d' % i)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
test_dia_iisb('stop', image_id='ami-3')
test_dia_iisb('stop', image_id='ami-4')
test_dia_iisb('stop', image_id='ami-5')
test_dia_iisb('stop', image_id='ami-6')
test_dia_iisb('terminate', image_id='ami-7',
instance_initiated_shutdown_behavior='terminate')
def test_create_delete_tags(self):
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create a test image
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
# Create some tags
md = {'key': 'foo', 'value': 'bar'}
md_result = {'foo': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id],
tag=[md])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md_result)
self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
# Delete them
self.cloud.delete_tags(self.context, resource_id=[ec2_id],
tag=[{'key': 'foo', 'value': 'bar'}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, {})
self.assertEqual(meta_changes, [{'foo': ['-']}])
def test_describe_tags(self):
# We need to stub network calls
self._stub_instance_get_with_fixed_ips('get_all')
self._stub_instance_get_with_fixed_ips('get')
# We need to stub out the MQ call - it won't succeed. We do want
# to check that the method is called, though
meta_changes = [None]
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
# Create some test images
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
inst1_kwargs = {
'reservation_id': 'a',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1111',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
}
inst2_kwargs = {
'reservation_id': 'b',
'image_ref': image_uuid,
'instance_type_id': 1,
'vm_state': 'active',
'launched_at': timeutils.utcnow(),
'hostname': 'server-1112',
'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
}
inst1 = db.instance_create(self.context, inst1_kwargs)
ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
inst2 = db.instance_create(self.context, inst2_kwargs)
ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
# Create some tags
# We get one overlapping pair, and each has a different key value pair
# inst1 : {'foo': 'bar', 'bax': 'wibble'}
# inst1 : {'foo': 'bar', 'baz': 'quux'}
md = {'key': 'foo', 'value': 'bar'}
md_result = {'foo': 'bar'}
self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
tag=[md])
self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md_result)
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst2)
self.assertEqual(metadata, md_result)
md2 = {'key': 'baz', 'value': 'quux'}
md2_result = {'baz': 'quux'}
md2_result.update(md_result)
self.cloud.create_tags(self.context, resource_id=[ec2_id2],
tag=[md2])
self.assertEqual(meta_changes, [{'baz': ['+', 'quux']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst2)
self.assertEqual(metadata, md2_result)
md3 = {'key': 'bax', 'value': 'wibble'}
md3_result = {'bax': 'wibble'}
md3_result.update(md_result)
self.cloud.create_tags(self.context, resource_id=[ec2_id1],
tag=[md3])
self.assertEqual(meta_changes, [{'bax': ['+', 'wibble']}])
metadata = self.cloud.compute_api.get_instance_metadata(self.context,
inst1)
self.assertEqual(metadata, md3_result)
inst1_key_foo = {'key': u'foo', 'resource_id': 'i-00000001',
'resource_type': 'instance', 'value': u'bar'}
inst1_key_bax = {'key': u'bax', 'resource_id': 'i-00000001',
'resource_type': 'instance', 'value': u'wibble'}
inst2_key_foo = {'key': u'foo', 'resource_id': 'i-00000002',
'resource_type': 'instance', 'value': u'bar'}
inst2_key_baz = {'key': u'baz', 'resource_id': 'i-00000002',
'resource_type': 'instance', 'value': u'quux'}
# We should be able to search by:
# No filter
tags = self.cloud.describe_tags(self.context)['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
inst2_key_baz, inst1_key_bax])
# Resource ID
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'resource-id',
'value': [ec2_id1]}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst1_key_bax])
# Resource Type
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'resource-type',
'value': ['instance']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
inst2_key_baz, inst1_key_bax])
# Key, either bare or with wildcards
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['foo']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['ba?']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['b*']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
# Value, either bare or with wildcards
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['bar']}])['tagSet']
self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['wi*']}])['tagSet']
self.assertEqual(tags, [inst1_key_bax])
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'value',
'value': ['quu?']}])['tagSet']
self.assertEqual(tags, [inst2_key_baz])
# Multiple values
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz', 'bax']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz, inst1_key_bax])
# Multiple filters (AND): no match
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']},
{'name': 'value',
'value': ['wibble']}])['tagSet']
self.assertEqual(tags, [])
# Multiple filters (AND): match
tags = self.cloud.describe_tags(self.context,
filter=[{'name': 'key',
'value': ['baz']},
{'name': 'value',
'value': ['quux']}])['tagSet']
self.assertEqualSorted(tags, [inst2_key_baz])
# And we should fail on supported resource types
self.assertRaises(exception.InvalidParameterValue,
self.cloud.describe_tags,
self.context,
filter=[{'name': 'resource-type',
'value': ['instance', 'volume']}])
def test_resource_type_from_id(self):
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'i-12345'),
'instance')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'r-12345'),
'reservation')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'vol-12345'),
'volume')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'snap-12345'),
'snapshot')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'ami-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'ari-12345'),
'image')
self.assertEqual(
ec2utils.resource_type_from_id(self.context, 'aki-12345'),
'image')
self.assertIsNone(
ec2utils.resource_type_from_id(self.context, 'x-12345'))
@mock.patch.object(ec2utils, 'ec2_vol_id_to_uuid',
side_effect=lambda
ec2_volume_id: uuidutils.generate_uuid())
def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid):
# Validates that VolumeUnattached is raised if the volume doesn't
# have an instance_uuid value.
ec2_volume_id = 'vol-987654321'
with mock.patch.object(self.cloud.volume_api, 'get',
side_effect=lambda context, volume_id:
{'id': volume_id}) as mock_get:
self.assertRaises(exception.VolumeUnattached,
self.cloud.detach_volume,
self.context,
ec2_volume_id)
mock_get.assert_called_once_with(self.context, mock.ANY)
mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id)
class CloudTestCaseNeutronProxy(test.NoDBTestCase):
def setUp(self):
super(CloudTestCaseNeutronProxy, self).setUp()
cfg.CONF.set_override('security_group_api', 'neutron')
self.cloud = cloud.CloudController()
self.original_client = neutronapi.get_client
neutronapi.get_client = test_neutron.get_client
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
def tearDown(self):
neutronapi.get_client = self.original_client
test_neutron.get_client()._reset()
super(CloudTestCaseNeutronProxy, self).tearDown()
def test_describe_security_groups(self):
# Makes sure describe_security_groups works and filters results.
group_name = 'test'
description = 'test'
self.cloud.create_security_group(self.context, group_name,
description)
result = self.cloud.describe_security_groups(self.context)
# NOTE(vish): should have the default group as well
self.assertEqual(len(result['securityGroupInfo']), 2)
result = self.cloud.describe_security_groups(self.context,
group_name=[group_name])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(result['securityGroupInfo'][0]['groupName'],
group_name)
self.cloud.delete_security_group(self.context, group_name)
def test_describe_security_groups_by_id(self):
group_name = 'test'
description = 'test'
self.cloud.create_security_group(self.context, group_name,
description)
neutron = test_neutron.get_client()
# Get id from neutron since cloud.create_security_group
# does not expose it.
search_opts = {'name': group_name}
groups = neutron.list_security_groups(
**search_opts)['security_groups']
result = self.cloud.describe_security_groups(self.context,
group_id=[groups[0]['id']])
self.assertEqual(len(result['securityGroupInfo']), 1)
self.assertEqual(
result['securityGroupInfo'][0]['groupName'],
group_name)
self.cloud.delete_security_group(self.context, group_name)
def test_create_delete_security_group(self):
descript = 'test description'
create = self.cloud.create_security_group
result = create(self.context, 'testgrp', descript)
group_descript = result['securityGroupSet'][0]['groupDescription']
self.assertEqual(descript, group_descript)
delete = self.cloud.delete_security_group
self.assertTrue(delete(self.context, 'testgrp'))
class FormatMappingTestCase(test.NoDBTestCase):
def test_format_mapping(self):
id_to_ec2_snap_id_map = {
'993b31ac-452e-4fed-b745-7718385f1811': 'snap-00000001',
'b409a2de-1c79-46bf-aa7e-ebdb4bf427ef': 'snap-00000002',
}
properties = {'block_device_mapping':
[{'guest_format': None, 'boot_index': 0,
'no_device': None, 'volume_id': None,
'volume_size': None, 'disk_bus': 'virtio',
'image_id': None, 'source_type': 'snapshot',
'device_type': 'disk',
'snapshot_id': '993b31ac-452e-4fed-b745-7718385f1811',
'destination_type': 'volume',
'delete_on_termination': None},
{'guest_format': None, 'boot_index': None,
'no_device': None, 'volume_id': None,
'volume_size': None, 'disk_bus': None,
'image_id': None, 'source_type': 'snapshot',
'device_type': None,
'snapshot_id': 'b409a2de-1c79-46bf-aa7e-ebdb4bf427ef',
'destination_type': 'volume',
'delete_on_termination': None}],
'checksum': '50bdc35edb03a38d91b1b071afb20a3c',
'min_ram': '0', 'disk_format': 'qcow2',
'image_name': 'cirros-0.3.0-x86_64-disk', 'bdm_v2': 'True',
'image_id': '4fce9db9-d89e-4eea-8d20-e2bae15292c1',
'root_device_name': '/dev/vda', 'container_format': 'bare',
'min_disk': '0', 'size': '9761280'}
result = {'description': None,
'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
'isPublic': False, 'imageId': 'ami-00000002',
'imageState': 'available', 'architecture': None,
'imageLocation': 'None (xb)',
'rootDeviceType': 'instance-store',
'rootDeviceName': '/dev/vda',
'imageType': 'machine', 'name': 'xb'}
expected = {'architecture': None,
'blockDeviceMapping':
[{'ebs': {'snapshotId': 'snap-00000002'}}],
'description': None,
'imageId': 'ami-00000002',
'imageLocation': 'None (xb)',
'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
'imageState': 'available',
'imageType': 'machine',
'isPublic': False,
'name': 'xb',
'rootDeviceName': '/dev/vda',
'rootDeviceType': 'instance-store'}
with mock.patch.object(ec2utils, 'id_to_ec2_snap_id',
lambda id: id_to_ec2_snap_id_map[id]):
cloud._format_mappings(properties, result)
self.assertEqual(expected, result)
| apache-2.0 |
jbk0th/Py4E-Captsone | pagerank/bs4/testing.py | 4 | 27272 | """Helper classes for tests."""
import pickle
import copy
import functools
import unittest
from unittest import TestCase
from bs4 import BeautifulSoup
from bs4.element import (
CharsetMetaAttributeValue,
Comment,
ContentMetaAttributeValue,
Doctype,
SoupStrainer,
)
from bs4.builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder
class SoupTest(unittest.TestCase):
@property
def default_builder(self):
return default_builder()
def soup(self, markup, **kwargs):
"""Build a Beautiful Soup object from markup."""
builder = kwargs.pop('builder', self.default_builder)
return BeautifulSoup(markup, builder=builder, **kwargs)
def document_for(self, markup):
"""Turn an HTML fragment into a document.
The details depend on the builder.
"""
return self.default_builder.test_fragment_to_document(markup)
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
builder = self.default_builder
obj = BeautifulSoup(to_parse, builder=builder)
if compare_parsed_to is None:
compare_parsed_to = to_parse
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
def assertConnectedness(self, element):
"""Ensure that next_element and previous_element are properly
set for all descendants of the given element.
"""
earlier = None
for e in element.descendants:
if earlier:
self.assertEqual(e, earlier.next_element)
self.assertEqual(earlier, e.previous_element)
earlier = e
class HTMLTreeBuilderSmokeTest(object):
"""A basic test of a treebuilder's competence.
Any HTML treebuilder, present or future, should be able to pass
these tests. With invalid markup, there's room for interpretation,
and different parsers can handle it differently. But with the
markup in these tests, there's not much room for interpretation.
"""
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), tree.decode())
def assertDoctypeHandled(self, doctype_fragment):
"""Assert that a given doctype string is handled correctly."""
doctype_str, soup = self._document_with_doctype(doctype_fragment)
# Make sure a Doctype object was created.
doctype = soup.contents[0]
self.assertEqual(doctype.__class__, Doctype)
self.assertEqual(doctype, doctype_fragment)
self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
# Make sure that the doctype was correctly associated with the
# parse tree and that the rest of the document parsed.
self.assertEqual(soup.p.contents[0], 'foo')
def _document_with_doctype(self, doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
soup = self.soup(markup)
return doctype, soup
def test_normal_doctypes(self):
"""Make sure normal, everyday HTML doctypes are handled correctly."""
self.assertDoctypeHandled("html")
self.assertDoctypeHandled(
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_public_doctype_with_url(self):
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
self.assertDoctypeHandled(doctype)
def test_system_doctype(self):
self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
def test_namespaced_system_doctype(self):
# We can handle a namespaced doctype with a system ID.
self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
def test_namespaced_public_doctype(self):
# Test a namespaced doctype with a public id.
self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
def test_real_xhtml_document(self):
"""A real XHTML document should come out more or less the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b""),
markup.replace(b"\n", b""))
def test_processing_instruction(self):
markup = b"""<?PITarget PIContent?>"""
soup = self.soup(markup)
self.assertEqual(markup, soup.encode("utf8"))
def test_deepcopy(self):
"""Make sure you can copy the tree builder.
This is important because the builder is part of a
BeautifulSoup object, and we want to be able to copy that.
"""
copy.deepcopy(self.default_builder)
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
soup = self.soup("<p/>")
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), "<p></p>")
def test_unclosed_tags_get_closed(self):
"""A tag that's not closed by the end of the document should be closed.
This applies to all tags except empty-element tags.
"""
self.assertSoupEquals("<p>", "<p></p>")
self.assertSoupEquals("<b>", "<b></b>")
self.assertSoupEquals("<br>", "<br/>")
def test_br_is_always_empty_element_tag(self):
"""A <br> tag is designated as an empty-element tag.
Some parsers treat <br></br> as one <br/> tag, some parsers as
two tags, but it should always be an empty-element tag.
"""
soup = self.soup("<br></br>")
self.assertTrue(soup.br.is_empty_element)
self.assertEqual(str(soup.br), "<br/>")
def test_nested_formatting_elements(self):
self.assertSoupEquals("<em><em></em></em>")
def test_double_head(self):
html = '''<!DOCTYPE html>
<html>
<head>
<title>Ordinary HEAD element test</title>
</head>
<script type="text/javascript">
alert("Help!");
</script>
<body>
Hello, world!
</body>
</html>
'''
soup = self.soup(html)
self.assertEqual("text/javascript", soup.find('script')['type'])
def test_comment(self):
# Comments are represented as Comment objects.
markup = "<p>foo<!--foobar-->baz</p>"
self.assertSoupEquals(markup)
soup = self.soup(markup)
comment = soup.find(text="foobar")
self.assertEqual(comment.__class__, Comment)
# The comment is properly integrated into the tree.
foo = soup.find(text="foo")
self.assertEqual(comment, foo.next_element)
baz = soup.find(text="baz")
self.assertEqual(comment, baz.previous_element)
def test_preserved_whitespace_in_pre_and_textarea(self):
"""Whitespace must be preserved in <pre> and <textarea> tags."""
self.assertSoupEquals("<pre> </pre>")
self.assertSoupEquals("<textarea> woo </textarea>")
def test_nested_inline_elements(self):
"""Inline elements can be nested indefinitely."""
b_tag = "<b>Inside a B tag</b>"
self.assertSoupEquals(b_tag)
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
self.assertSoupEquals(nested_b_tag)
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
self.assertSoupEquals(nested_b_tag)
def test_nested_block_level_elements(self):
"""Block elements can be nested."""
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
blockquote = soup.blockquote
self.assertEqual(blockquote.p.b.string, 'Foo')
self.assertEqual(blockquote.b.string, 'Foo')
def test_correctly_nested_tables(self):
"""One table can go inside another one."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tr><td>Here\'s another table:'
'<table id="2"><tr><td>foo</td></tr></table>'
'</td></tr></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_deeply_nested_multivalued_attribute(self):
# html5lib can set the attributes of the same tag many times
# as it rearranges the tree. This has caused problems with
# multivalued attributes.
markup = '<table><div><div class="css"></div></div></table>'
soup = self.soup(markup)
self.assertEqual(["css"], soup.div.div['class'])
def test_multivalued_attribute_on_html(self):
# html5lib uses a different API to set the attributes ot the
# <html> tag. This has caused problems with multivalued
# attributes.
markup = '<html class="a b"></html>'
soup = self.soup(markup)
self.assertEqual(["a", "b"], soup.html['class'])
def test_angle_brackets_in_attribute_values_are_escaped(self):
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
def test_entities_in_attributes_converted_to_unicode(self):
expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
def test_entities_in_text_converted_to_unicode(self):
expect = '<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
def test_quot_entity_converted_to_quotation_mark(self):
self.assertSoupEquals("<p>I said "good day!"</p>",
'<p>I said "good day!"</p>')
def test_out_of_range_entity(self):
expect = "\N{REPLACEMENT CHARACTER}"
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
def test_multipart_strings(self):
"Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
self.assertEqual("p", soup.h2.string.next_element.name)
self.assertEqual("p", soup.p.name)
self.assertConnectedness(soup)
def test_head_tag_between_head_and_body(self):
"Prevent recurrence of a bug in the html5lib treebuilder."
content = """<html><head></head>
<link></link>
<body>foo</body>
</html>
"""
soup = self.soup(content)
self.assertNotEqual(None, soup.html.body)
self.assertConnectedness(soup)
def test_multiple_copies_of_a_tag(self):
"Prevent recurrence of a bug in the html5lib treebuilder."
content = """<!DOCTYPE html>
<html>
<body>
<article id="a" >
<div><a href="1"></div>
<footer>
<a href="2"></a>
</footer>
</article>
</body>
</html>
"""
soup = self.soup(content)
self.assertConnectedness(soup.article)
def test_basic_namespaces(self):
"""Parsers don't need to *understand* namespaces, but at the
very least they should not choke on namespaces or lose
data."""
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
soup = self.soup(markup)
self.assertEqual(markup, soup.encode())
html = soup.html
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
self.assertEqual(
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
self.assertEqual(
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
def test_multivalued_attribute_value_becomes_list(self):
markup = b'<a class="foo bar">'
soup = self.soup(markup)
self.assertEqual(['foo', 'bar'], soup.a['class'])
#
# Generally speaking, tests below this point are more tests of
# Beautiful Soup than tests of the tree builders. But parsers are
# weird, so we run these tests separately for every tree builder
# to detect any differences between them.
#
def test_can_parse_unicode_document(self):
# A seemingly innocuous document... but it's in Unicode! And
# it contains characters that can't be represented in the
# encoding found in the declaration! The horror!
markup = '<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
soup = self.soup(markup)
self.assertEqual('Sacr\xe9 bleu!', soup.body.string)
def test_soupstrainer(self):
"""Parsers should be able to work with SoupStrainers."""
strainer = SoupStrainer("b")
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
parse_only=strainer)
self.assertEqual(soup.decode(), "<b>bold</b>")
def test_single_quote_attribute_values_become_double_quotes(self):
self.assertSoupEquals("<foo attr='bar'></foo>",
'<foo attr="bar"></foo>')
def test_attribute_values_with_nested_quotes_are_left_alone(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
self.assertSoupEquals(text)
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
soup = self.soup(text)
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
self.assertSoupEquals(
soup.foo.decode(),
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
def test_ampersand_in_attribute_value_gets_escaped(self):
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
'<this is="really messed up & stuff"></this>')
self.assertSoupEquals(
'<a href="http://example.org?a=1&b=2;3">foo</a>',
'<a href="http://example.org?a=1&b=2;3">foo</a>')
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
def test_entities_in_strings_converted_during_parsing(self):
# Both XML and HTML entities are converted to Unicode characters
# during parsing.
text = "<p><<sacré bleu!>></p>"
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
self.assertSoupEquals(text, expected)
def test_smart_quotes_converted_on_the_way_in(self):
# Microsoft smart quotes are converted to Unicode characters during
# parsing.
quote = b"<p>\x91Foo\x92</p>"
soup = self.soup(quote)
self.assertEqual(
soup.p.string,
"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
def test_non_breaking_spaces_converted_on_the_way_in(self):
soup = self.soup("<a> </a>")
self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2)
def test_entities_converted_on_the_way_out(self):
text = "<p><<sacré bleu!>></p>"
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
soup = self.soup(text)
self.assertEqual(soup.p.encode("utf-8"), expected)
def test_real_iso_latin_document(self):
# Smoke test of interrelated functionality, using an
# easy-to-understand document.
# Here it is in Unicode. Note that it claims to be in ISO-Latin-1.
unicode_html = '<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
# That's because we're going to encode it into ISO-Latin-1, and use
# that to test.
iso_latin_html = unicode_html.encode("iso-8859-1")
# Parse the ISO-Latin-1 HTML.
soup = self.soup(iso_latin_html)
# Encode it to UTF-8.
result = soup.encode("utf-8")
# What do we expect the result to look like? Well, it would
# look like unicode_html, except that the META tag would say
# UTF-8 instead of ISO-Latin-1.
expected = unicode_html.replace("ISO-Latin-1", "utf-8")
# And, of course, it would be in UTF-8, not Unicode.
expected = expected.encode("utf-8")
# Ta-da!
self.assertEqual(result, expected)
def test_real_shift_jis_document(self):
# Smoke test to make sure the parser can handle a document in
# Shift-JIS encoding, without choking.
shift_jis_html = (
b'<html><head></head><body><pre>'
b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
b'</pre></body></html>')
unicode_html = shift_jis_html.decode("shift-jis")
soup = self.soup(unicode_html)
# Make sure the parse tree is correctly encoded to various
# encodings.
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
def test_real_hebrew_document(self):
# A real-world test to make sure we can convert ISO-8859-9 (a
# Hebrew encoding) to UTF-8.
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
soup = self.soup(
hebrew_document, from_encoding="iso8859-8")
self.assertEqual(soup.original_encoding, 'iso8859-8')
self.assertEqual(
soup.encode('utf-8'),
hebrew_document.decode("iso8859-8").encode("utf-8"))
def test_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
content = parsed_meta['content']
self.assertEqual('text/html; charset=x-sjis', content)
# But that value is actually a ContentMetaAttributeValue object.
self.assertTrue(isinstance(content, ContentMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('text/html; charset=utf8', content.encode("utf8"))
# For the rest of the story, see TestSubstitutions in
# test_tree.py.
def test_html5_style_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta id="encoding" charset="x-sjis" />')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', id="encoding")
charset = parsed_meta['charset']
self.assertEqual('x-sjis', charset)
# But that value is actually a CharsetMetaAttributeValue object.
self.assertTrue(isinstance(charset, CharsetMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('utf8', charset.encode("utf8"))
def test_tag_with_no_attributes_can_have_attributes_added(self):
data = self.soup("<a>text</a>")
data.a['foo'] = 'bar'
self.assertEqual('<a foo="bar">text</a>', data.a.decode())
class XMLTreeBuilderSmokeTest(object):
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
self.assertEqual(loaded.__class__, BeautifulSoup)
self.assertEqual(loaded.decode(), tree.decode())
def test_docstring_generated(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
def test_real_xhtml_document(self):
"""A real XHTML document should come out *exactly* the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8"), markup)
def test_formatter_processes_script_tag_for_xml_documents(self):
doc = """
<script type="text/javascript">
</script>
"""
soup = BeautifulSoup(doc, "lxml-xml")
# lxml would have stripped this while parsing, but we can add
# it later.
soup.script.string = 'console.log("< < hey > > ");'
encoded = soup.encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_can_parse_unicode_document(self):
markup = '<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
soup = self.soup(markup)
self.assertEqual('Sacr\xe9 bleu!', soup.root.string)
def test_popping_namespaced_tag(self):
markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
soup = self.soup(markup)
self.assertEqual(
str(soup.rss), markup)
def test_docstring_includes_correct_encoding(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode("latin1"),
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
def test_large_xml_document(self):
"""A large XML document should come out the same as it went in."""
markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>'
+ b'0' * (2**12)
+ b'</root>')
soup = self.soup(markup)
self.assertEqual(soup.encode("utf-8"), markup)
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
self.assertSoupEquals("<p>", "<p/>")
self.assertSoupEquals("<p>foo</p>")
def test_namespaces_are_preserved(self):
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
soup = self.soup(markup)
root = soup.root
self.assertEqual("http://example.com/", root['xmlns:a'])
self.assertEqual("http://example.net/", root['xmlns:b'])
def test_closing_namespaced_tag(self):
markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
soup = self.soup(markup)
self.assertEqual(str(soup.p), markup)
def test_namespaced_attributes(self):
markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
soup = self.soup(markup)
self.assertEqual(str(soup.foo), markup)
def test_namespaced_attributes_xml_namespace(self):
markup = '<foo xml:lang="fr">bar</foo>'
soup = self.soup(markup)
self.assertEqual(str(soup.foo), markup)
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
"""Smoke test for a tree builder that supports HTML5."""
def test_real_xhtml_document(self):
# Since XHTML is not HTML5, HTML5 parsers are not tested to handle
# XHTML documents in any particular way.
pass
def test_html_tags_have_namespace(self):
markup = "<a>"
soup = self.soup(markup)
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
def test_svg_tags_have_namespace(self):
markup = '<svg><circle/></svg>'
soup = self.soup(markup)
namespace = "http://www.w3.org/2000/svg"
self.assertEqual(namespace, soup.svg.namespace)
self.assertEqual(namespace, soup.circle.namespace)
def test_mathml_tags_have_namespace(self):
markup = '<math><msqrt>5</msqrt></math>'
soup = self.soup(markup)
namespace = 'http://www.w3.org/1998/Math/MathML'
self.assertEqual(namespace, soup.math.namespace)
self.assertEqual(namespace, soup.msqrt.namespace)
def test_xml_declaration_becomes_comment(self):
markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
soup = self.soup(markup)
self.assertTrue(isinstance(soup.contents[0], Comment))
self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?')
self.assertEqual("html", soup.contents[0].next_element.name)
def skipIf(condition, reason):
def nothing(test, *args, **kwargs):
return None
def decorator(test_item):
if condition:
return nothing
else:
return test_item
return decorator
| mit |
gjhiggins/fuguecoin | contrib/linearize/linearize-hashes.py | 1 | 4123 | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import unittest
import os
from datetime import datetime, timedelta, tzinfo
import binascii
import json
import subprocess
import re
import requests
import time
from rdflib import (
Namespace,
URIRef,
Graph,
Literal,
)
from rdflib.namespace import RDF, XSD, SKOS # , RDFS
import base64
import sys
settings = dict(
rpcuser="fuguecoinrpcuser",
rpcpassword="fuguecoinrpcuserpassword",
host="127.0.0.1",
port=9088,
rpcport=9089,
min_height=0,
max_height=2374500,
netmagic=0xf9beb4d4,
genesis=0x0000008625534e38467b29078ed2868c2ec43eb024c4a2e7ff2f6480f283656b,
input="/home/gjh/minkiz/fabshop/FugueCoinWork/fuguecoin/datadir/blocks",
output="/home/gjh/minkiz/fabshop/FugueCoinWork/fuguecoin-0.8.6/datadir/bootstrap.dat",
hashlist="hashlist.txt",
split_year=5,
out_of_order_cache_sz = 100000000
)
# Switch endian-ness
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i + 2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class RPCHost(object):
def __init__(self, url):
self._session = requests.Session()
self._url = url
self._headers = {'content-type': 'application/json'}
def call(self, rpcMethod, *params):
payload = json.dumps({"method": rpcMethod, "params": list(params), "jsonrpc": "2.0"})
tries = 10
hadConnectionFailures = False
while True:
# print("{url} {headers} {data}".format(url=self._url, headers=self._headers, data=payload))
try:
response = self._session.get(self._url, headers=self._headers, data=payload)
except requests.exceptions.ConnectionError:
tries -= 1
if tries == 0:
raise Exception('Failed to connect for remote procedure call.')
hadConnectionFailures = True
print("Couldn't connect for remote procedure call, will sleep for ten seconds and then try again ({} more tries)".format(tries))
time.sleep(10)
else:
if hadConnectionFailures:
print('Connected for remote procedure call after retry.')
break
if response.status_code not in (200, 500):
raise Exception('RPC connection failure: ' + str(response.status_code) + ' ' + response.reason)
responseJSON = response.json()
if 'error' in responseJSON and responseJSON['error'] is not None:
raise Exception('Error in RPC call: ' + str(responseJSON['error']))
return responseJSON['result']
class TestMyView(unittest.TestCase):
def setUp(self):
self.serverurl = 'http://{}:{}@localhost:{}/'.format(
settings.get('rpcuser'), settings.get('rpcpassword'), settings.get('rpcport'))
self.amerpc = RPCHost(self.serverurl)
@unittest.skip("Passed, skipping")
def test_get_block_hashes(self):
with open('/home/gjh/minkiz/fabshop/FugueCoinWork/fuguecoin-0.8.6/contrib/linearize/fuguehashes.txt', 'w') as fp:
for height in range(0, 2374500):
res = hex_switchEndian(self.amerpc.call('getblockhash', height))
fp.write("{}\n".format(res))
fp.close()
def test_get_unswitch(self):
with open(
'/home/gjh/minkiz/fabshop/FugueCoinWork/fuguecoin-0.8.6/contrib/linearize/fuguehashes.txt', 'r') as fin, open(
'/home/gjh/minkiz/fabshop/FugueCoinWork/fuguecoin-0.8.6/contrib/linearize/hashlist.txt', 'w') as fout:
for l in fin.readlines():
res = hex_switchEndian(l[:-1])
fout.write("{}\n".format(res))
fout.close()
if __name__ == "__main__":
unittest.main()
| mit |
konstruktoid/ansible-upstream | lib/ansible/plugins/netconf/junos.py | 6 | 5602 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible import constants as C
from ansible.module_utils._text import to_text, to_bytes
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.netconf import NetconfBase
from ansible.plugins.netconf import ensure_connected
try:
from ncclient import manager
from ncclient.operations import RPCError
from ncclient.transport.errors import SSHUnknownHostError
from ncclient.xml_ import to_ele, to_xml, new_ele
except ImportError:
raise AnsibleError("ncclient is not installed")
class Netconf(NetconfBase):
def get_text(self, ele, tag):
try:
return to_text(ele.find(tag).text, errors='surrogate_then_replace').strip()
except AttributeError:
pass
def get_device_info(self):
device_info = dict()
device_info['network_os'] = 'junos'
ele = new_ele('get-software-information')
data = self.execute_rpc(to_xml(ele))
reply = to_ele(data)
sw_info = reply.find('.//software-information')
device_info['network_os_version'] = self.get_text(sw_info, 'junos-version')
device_info['network_os_hostname'] = self.get_text(sw_info, 'host-name')
device_info['network_os_model'] = self.get_text(sw_info, 'product-model')
return device_info
@ensure_connected
def execute_rpc(self, name):
"""RPC to be execute on remote device
:name: Name of rpc in string format"""
return self.rpc(name)
@ensure_connected
def load_configuration(self, *args, **kwargs):
"""Loads given configuration on device
:format: Format of configuration (xml, text, set)
:action: Action to be performed (merge, replace, override, update)
:target: is the name of the configuration datastore being edited
:config: is the configuration in string format."""
if kwargs.get('config'):
kwargs['config'] = to_bytes(kwargs['config'], errors='surrogate_or_strict')
if kwargs.get('format', 'xml') == 'xml':
kwargs['config'] = to_ele(kwargs['config'])
try:
return self.m.load_configuration(*args, **kwargs).data_xml
except RPCError as exc:
raise Exception(to_xml(exc.xml))
def get_capabilities(self):
result = dict()
result['rpc'] = self.get_base_rpc() + ['commit', 'discard_changes', 'validate', 'lock', 'unlock', 'copy_copy',
'execute_rpc', 'load_configuration', 'get_configuration', 'command',
'reboot', 'halt']
result['network_api'] = 'netconf'
result['device_info'] = self.get_device_info()
result['server_capabilities'] = [c for c in self.m.server_capabilities]
result['client_capabilities'] = [c for c in self.m.client_capabilities]
result['session_id'] = self.m.session_id
result['device_operations'] = self.get_device_operations(result['server_capabilities'])
return json.dumps(result)
@staticmethod
def guess_network_os(obj):
try:
m = manager.connect(
host=obj._play_context.remote_addr,
port=obj._play_context.port or 830,
username=obj._play_context.remote_user,
password=obj._play_context.password,
key_filename=obj._play_context.private_key_file,
hostkey_verify=C.HOST_KEY_CHECKING,
look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
allow_agent=obj._play_context.allow_agent,
timeout=obj._play_context.timeout
)
except SSHUnknownHostError as exc:
raise AnsibleConnectionFailure(str(exc))
guessed_os = None
for c in m.server_capabilities:
if re.search('junos', c):
guessed_os = 'junos'
m.close_session()
return guessed_os
@ensure_connected
def get_configuration(self, *args, **kwargs):
"""Retrieve all or part of a specified configuration.
:format: format in configuration should be retrieved
:filter: specifies the portion of the configuration to retrieve
(by default entire configuration is retrieved)"""
return self.m.get_configuration(*args, **kwargs).data_xml
@ensure_connected
def compare_configuration(self, *args, **kwargs):
"""Compare configuration
:rollback: rollback id"""
return self.m.compare_configuration(*args, **kwargs).data_xml
@ensure_connected
def halt(self):
"""reboot the device"""
return self.m.halt().data_xml
@ensure_connected
def reboot(self):
"""reboot the device"""
return self.m.reboot().data_xml
| gpl-3.0 |
jakobworldpeace/scikit-learn | sklearn/svm/bounds.py | 36 | 2712 | """Determination of parameter bounds"""
# Author: Paolo Losi
# License: BSD 3 clause
from warnings import warn
import numpy as np
from ..preprocessing import LabelBinarizer
from ..utils.validation import check_consistent_length, check_array
from ..utils.extmath import safe_sparse_dot
def l1_min_c(X, y, loss='squared_hinge', fit_intercept=True,
intercept_scaling=1.0):
"""
Return the lowest bound for C such that for C in (l1_min_C, infinity)
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as LinearSVC with penalty='l1' and
linear_model.LogisticRegression with penalty='l1'.
This value is valid if class_weight parameter in fit() is not set.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector relative to X
loss : {'squared_hinge', 'log'}, default 'squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
'l2' is accepted as an alias for 'squared_hinge', for backward
compatibility reasons, but should not be used in new code.
fit_intercept : bool, default: True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default: 1
when fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
minimum value for C
"""
if loss not in ('squared_hinge', 'log'):
raise ValueError('loss type not in ("squared_hinge", "log", "l2")')
X = check_array(X, accept_sparse='csc')
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = intercept_scaling * np.ones((np.size(y), 1))
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError('Ill-posed l1_min_c calculation: l1 will always '
'select zero coefficients for this data')
if loss == 'squared_hinge':
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
| bsd-3-clause |
anryko/ansible | lib/ansible/modules/cloud/vmware/_vmware_dns_config.py | 11 | 3905 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_dns_config
short_description: Manage VMware ESXi DNS Configuration
description:
- Manage VMware ESXi DNS Configuration
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
deprecated:
removed_in: '2.14'
why: Will be replaced with new module vmware_host_dns.
alternative: Use M(vmware_host_dns) instead.
options:
change_hostname_to:
description:
- The hostname that an ESXi host should be changed to.
required: True
type: str
domainname:
description:
- The domain the ESXi host should be apart of.
required: True
type: str
dns_servers:
description:
- The DNS servers that the host should be configured to use.
required: True
type: list
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Configure ESXi hostname and DNS servers
vmware_dns_config:
hostname: '{{ esxi_hostname }}'
username: '{{ esxi_username }}'
password: '{{ esxi_password }}'
change_hostname_to: esx01
domainname: foo.org
dns_servers:
- 8.8.8.8
- 8.8.4.4
delegate_to: localhost
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import HAS_PYVMOMI, connect_to_api, get_all_objs, vmware_argument_spec
def configure_dns(host_system, hostname, domainname, dns_servers):
changed = False
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
config = host_network_system.dnsConfig
config.dhcp = False
if config.address != dns_servers:
config.address = dns_servers
changed = True
if config.domainName != domainname:
config.domainName = domainname
changed = True
if config.hostName != hostname:
config.hostName = hostname
changed = True
if changed:
host_network_system.UpdateDnsConfig(config)
return changed
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'),
domainname=dict(required=True, type='str'),
dns_servers=dict(required=True, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
change_hostname_to = module.params['change_hostname_to']
domainname = module.params['domainname']
dns_servers = module.params['dns_servers']
try:
content = connect_to_api(module)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = list(host)[0]
changed = configure_dns(host_system, change_hostname_to, domainname, dns_servers)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-57/modules/sheets/lib/python2.7/site-packages/pip/_vendor/distlib/scripts.py | 333 | 15224 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = '''# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, re
def _resolve(module, func):
__import__(module)
mod = sys.modules[module]
parts = func.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
try:
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
func = _resolve('%(module)s', '%(func)s')
rc = func() # None interpreted as 0
except Exception as e: # only supporting Python >= 2.6
sys.stderr.write('%%s\\n' %% e)
rc = 1
sys.exit(rc)
'''
def _enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
# instead of "/usr/bin/env /dir with spaces/bin/jython"
# otherwise whole
if executable.startswith('/usr/bin/env '):
env, _executable = executable.split(' ', 1)
if ' ' in _executable and not _executable.startswith('"'):
executable = '%s "%s"' % (env, _executable)
else:
if not executable.startswith('"'):
executable = '"%s"' % executable
return executable
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows
executable = os.path.normcase(executable)
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = _enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = b'#!' + executable + post_interp + b'\n'
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not use_launcher:
script_bytes = shebang + linesep + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + linesep + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s' % (name, sys.version[:3]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
result = finder(distlib_package).find(name).bytes
return result
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| gpl-3.0 |
Endika/odoo | addons/account/product.py | 374 | 2897 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_category(osv.osv):
_inherit = "product.category"
_columns = {
'property_account_income_categ': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used for invoices to value sales."),
'property_account_expense_categ': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used for invoices to value expenses."),
}
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_inherit = "product.template"
_columns = {
'taxes_id': fields.many2many('account.tax', 'product_taxes_rel',
'prod_id', 'tax_id', 'Customer Taxes',
domain=[('parent_id','=',False),('type_tax_use','in',['sale','all'])]),
'supplier_taxes_id': fields.many2many('account.tax',
'product_supplier_taxes_rel', 'prod_id', 'tax_id',
'Supplier Taxes', domain=[('parent_id', '=', False),('type_tax_use','in',['purchase','all'])]),
'property_account_income': fields.property(
type='many2one',
relation='account.account',
string="Income Account",
help="This account will be used for invoices instead of the default one to value sales for the current product."),
'property_account_expense': fields.property(
type='many2one',
relation='account.account',
string="Expense Account",
help="This account will be used for invoices instead of the default one to value expenses for the current product."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openstack/python-congressclient | congressclient/tests/fakes.py | 1 | 2183 | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import six
AUTH_TOKEN = "foobar"
AUTH_URL = "http://0.0.0.0"
class FakeStdout(object):
def __init__(self):
self.content = []
def write(self, text):
self.content.append(text)
def make_string(self):
result = ''
for line in self.content:
result = result + line
return result
class FakeApp(object):
def __init__(self, _stdout):
self.stdout = _stdout
self.client_manager = None
self.stdin = sys.stdin
self.stdout = _stdout or sys.stdout
self.stderr = sys.stderr
self.restapi = None
class FakeClientManager(object):
def __init__(self):
self.compute = None
self.identity = None
self.image = None
self.object = None
self.volume = None
self.network = None
self.auth_ref = None
class FakeModule(object):
def __init__(self, name, version):
self.name = name
self.__version__ = version
class FakeResource(object):
def __init__(self, manager, info, loaded=False):
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def _add_details(self, info):
for (k, v) in six.iteritems(info):
setattr(self, k, v)
def __repr__(self):
reprkeys = sorted(k for k in self.__dict__.keys() if k[0] != '_' and
k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.