repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rlr/fjord | vendor/packages/translate-toolkit/translate/filters/spelling.py | 3 | 2125 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
# 2013 F Wolff
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""An API to provide spell checking for use in checks or elsewhere."""
import logging
logger = logging.getLogger(__name__)
available = False
try:
# Enchant
from enchant import checker, DictNotFoundError, Error as EnchantError
available = True
checkers = {}
def _get_checker(lang):
if not lang in checkers:
try:
checkers[lang] = checker.SpellChecker(lang)
# some versions only report an error when checking something
checkers[lang].check(u'bla')
except EnchantError, e:
# sometimes this is raised instead of DictNotFoundError
logger.error(str(e))
checkers[lang] = None
return checkers[lang]
def check(text, lang):
spellchecker = _get_checker(lang)
if not spellchecker:
return
spellchecker.set_text(unicode(text))
for err in spellchecker:
yield err.word, err.wordpos, err.suggest()
def simple_check(text, lang):
spellchecker = _get_checker(lang)
if not spellchecker:
return
spellchecker.set_text(unicode(text))
for err in spellchecker:
yield err.word
except ImportError:
def check(text, lang):
return []
def simple_check(text, lang):
return []
| bsd-3-clause |
cryptickp/heat | heat/tests/clients/test_keystone_client.py | 6 | 25062 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from keystoneclient import exceptions as keystone_exceptions
from heat.common import exception
from heat.engine.clients.os import keystone as client
from heat.tests import common
class KeystoneRoleConstraintTest(common.HeatTestCase):
def test_expected_exceptions(self):
self.assertEqual((exception.EntityNotFound,),
client.KeystoneRoleConstraint.expected_exceptions,
"KeystoneRoleConstraint expected exceptions error")
def test_constrain(self):
constrain = client.KeystoneRoleConstraint()
client_mock = mock.MagicMock()
client_plugin_mock = mock.MagicMock()
client_plugin_mock.get_role_id.return_value = None
client_mock.client_plugin.return_value = client_plugin_mock
self.assertIsNone(constrain.validate_with_client(client_mock,
'role_1'))
client_plugin_mock.get_role_id.assert_called_once_with('role_1')
class KeystoneProjectConstraintTest(common.HeatTestCase):
def test_expected_exceptions(self):
self.assertEqual((exception.EntityNotFound,),
client.KeystoneProjectConstraint.expected_exceptions,
"KeystoneProjectConstraint expected exceptions error")
def test_constrain(self):
constrain = client.KeystoneProjectConstraint()
client_mock = mock.MagicMock()
client_plugin_mock = mock.MagicMock()
client_plugin_mock.get_project_id.return_value = None
client_mock.client_plugin.return_value = client_plugin_mock
self.assertIsNone(constrain.validate_with_client(client_mock,
'project_1'))
client_plugin_mock.get_project_id.assert_called_once_with('project_1')
class KeystoneGroupConstraintTest(common.HeatTestCase):
def test_expected_exceptions(self):
self.assertEqual((exception.EntityNotFound,),
client.KeystoneGroupConstraint.expected_exceptions,
"KeystoneGroupConstraint expected exceptions error")
def test_constrain(self):
constrain = client.KeystoneGroupConstraint()
client_mock = mock.MagicMock()
client_plugin_mock = mock.MagicMock()
client_plugin_mock.get_group_id.return_value = None
client_mock.client_plugin.return_value = client_plugin_mock
self.assertIsNone(constrain.validate_with_client(client_mock,
'group_1'))
client_plugin_mock.get_group_id.assert_called_once_with('group_1')
class KeystoneDomainConstraintTest(common.HeatTestCase):
def test_expected_exceptions(self):
self.assertEqual((exception.EntityNotFound,),
client.KeystoneDomainConstraint.expected_exceptions,
"KeystoneDomainConstraint expected exceptions error")
def test_constrain(self):
constrain = client.KeystoneDomainConstraint()
client_mock = mock.MagicMock()
client_plugin_mock = mock.MagicMock()
client_plugin_mock.get_domain_id.return_value = None
client_mock.client_plugin.return_value = client_plugin_mock
self.assertIsNone(constrain.validate_with_client(client_mock,
'domain_1'))
client_plugin_mock.get_domain_id.assert_called_once_with('domain_1')
class KeystoneServiceConstraintTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
def test_expected_exceptions(self):
self.assertEqual((exception.EntityNotFound,
exception.KeystoneServiceNameConflict,),
client.KeystoneServiceConstraint.expected_exceptions,
"KeystoneServiceConstraint expected exceptions error")
def test_constrain(self):
constrain = client.KeystoneServiceConstraint()
client_mock = mock.MagicMock()
client_plugin_mock = mock.MagicMock()
client_plugin_mock.get_service_id.return_value = self.sample_uuid
client_mock.client_plugin.return_value = client_plugin_mock
self.assertIsNone(constrain.validate_with_client(client_mock,
self.sample_uuid))
client_plugin_mock.get_service_id.assert_called_once_with(
self.sample_uuid
)
class KeystoneUserConstraintTest(common.HeatTestCase):
def test_expected_exceptions(self):
self.assertEqual((exception.EntityNotFound,),
client.KeystoneUserConstraint.expected_exceptions,
"KeystoneUserConstraint expected exceptions error")
def test_constrain(self):
constrain = client.KeystoneUserConstraint()
client_mock = mock.MagicMock()
client_plugin_mock = mock.MagicMock()
client_plugin_mock.get_user_id.return_value = None
client_mock.client_plugin.return_value = client_plugin_mock
self.assertIsNone(constrain.validate_with_client(client_mock, 'admin'))
client_plugin_mock.get_user_id.assert_called_once_with('admin')
class KeystoneClientPluginServiceTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_service'
def _get_mock_service(self):
srv = mock.MagicMock()
srv.id = self.sample_uuid
srv.name = self.sample_name
return srv
def setUp(self):
super(KeystoneClientPluginServiceTest, self).setUp()
self._client = mock.MagicMock()
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_service_id(self, client_keystone):
self._client.client.services.get.return_value = (self
._get_mock_service())
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_service_id(self.sample_uuid))
self._client.client.services.get.assert_called_once_with(
self.sample_uuid)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_service_id_with_name(self, client_keystone):
self._client.client.services.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.services.list.return_value = [
self._get_mock_service()
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_service_id(self.sample_name))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.services.get,
self.sample_name)
self._client.client.services.list.assert_called_once_with(
name=self.sample_name)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_service_id_with_name_conflict(self, client_keystone):
self._client.client.services.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.services.list.return_value = [
self._get_mock_service(),
self._get_mock_service()
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
ex = self.assertRaises(exception.KeystoneServiceNameConflict,
client_plugin.get_service_id,
self.sample_name)
msg = ("Keystone has more than one service with same name "
"%s. Please use service id instead of name" %
self.sample_name)
self.assertEqual(msg, six.text_type(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.services.get,
self.sample_name)
self._client.client.services.list.assert_called_once_with(
name=self.sample_name)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_service_id_not_found(self, client_keystone):
self._client.client.services.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.services.list.return_value = [
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
ex = self.assertRaises(exception.EntityNotFound,
client_plugin.get_service_id,
self.sample_name)
msg = ("The KeystoneService (%(name)s) could not be found." %
{'name': self.sample_name})
self.assertEqual(msg, six.text_type(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.services.get,
self.sample_name)
self._client.client.services.list.assert_called_once_with(
name=self.sample_name)
class KeystoneClientPluginRoleTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_role'
def _get_mock_role(self):
role = mock.MagicMock()
role.id = self.sample_uuid
role.name = self.sample_name
return role
def setUp(self):
super(KeystoneClientPluginRoleTest, self).setUp()
self._client = mock.MagicMock()
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_role_id(self, client_keystone):
self._client.client.roles.get.return_value = (self
._get_mock_role())
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_role_id(self.sample_uuid))
self._client.client.roles.get.assert_called_once_with(
self.sample_uuid)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_role_id_with_name(self, client_keystone):
self._client.client.roles.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.roles.list.return_value = [
self._get_mock_role()
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_role_id(self.sample_name))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.roles.get,
self.sample_name)
self._client.client.roles.list.assert_called_once_with(
name=self.sample_name)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_role_id_not_found(self, client_keystone):
self._client.client.roles.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.roles.list.return_value = [
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
ex = self.assertRaises(exception.EntityNotFound,
client_plugin.get_role_id,
self.sample_name)
msg = ("The KeystoneRole (%(name)s) could not be found." %
{'name': self.sample_name})
self.assertEqual(msg, six.text_type(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.roles.get,
self.sample_name)
self._client.client.roles.list.assert_called_once_with(
name=self.sample_name)
class KeystoneClientPluginProjectTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_project'
def _get_mock_project(self):
project = mock.MagicMock()
project.id = self.sample_uuid
project.name = self.sample_name
return project
def setUp(self):
super(KeystoneClientPluginProjectTest, self).setUp()
self._client = mock.MagicMock()
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_project_id(self, client_keystone):
self._client.client.projects.get.return_value = (self
._get_mock_project())
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_project_id(self.sample_uuid))
self._client.client.projects.get.assert_called_once_with(
self.sample_uuid)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_project_id_with_name(self, client_keystone):
self._client.client.projects.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.projects.list.return_value = [
self._get_mock_project()
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_project_id(self.sample_name))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.projects.get,
self.sample_name)
self._client.client.projects.list.assert_called_once_with(
name=self.sample_name)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_project_id_not_found(self, client_keystone):
self._client.client.projects.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.projects.list.return_value = [
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
ex = self.assertRaises(exception.EntityNotFound,
client_plugin.get_project_id,
self.sample_name)
msg = ("The KeystoneProject (%(name)s) could not be found." %
{'name': self.sample_name})
self.assertEqual(msg, six.text_type(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.projects.get,
self.sample_name)
self._client.client.projects.list.assert_called_once_with(
name=self.sample_name)
class KeystoneClientPluginDomainTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_domain'
def _get_mock_domain(self):
domain = mock.MagicMock()
domain.id = self.sample_uuid
domain.name = self.sample_name
return domain
def setUp(self):
super(KeystoneClientPluginDomainTest, self).setUp()
self._client = mock.MagicMock()
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_domain_id(self, client_keystone):
self._client.client.domains.get.return_value = (self
._get_mock_domain())
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_domain_id(self.sample_uuid))
self._client.client.domains.get.assert_called_once_with(
self.sample_uuid)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_domain_id_with_name(self, client_keystone):
self._client.client.domains.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.domains.list.return_value = [
self._get_mock_domain()
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_domain_id(self.sample_name))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.domains.get,
self.sample_name)
self._client.client.domains.list.assert_called_once_with(
name=self.sample_name)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_domain_id_not_found(self, client_keystone):
self._client.client.domains.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.domains.list.return_value = [
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
ex = self.assertRaises(exception.EntityNotFound,
client_plugin.get_domain_id,
self.sample_name)
msg = ("The KeystoneDomain (%(name)s) could not be found." %
{'name': self.sample_name})
self.assertEqual(msg, six.text_type(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.domains.get,
self.sample_name)
self._client.client.domains.list.assert_called_once_with(
name=self.sample_name)
class KeystoneClientPluginGroupTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_group'
def _get_mock_group(self):
group = mock.MagicMock()
group.id = self.sample_uuid
group.name = self.sample_name
return group
def setUp(self):
super(KeystoneClientPluginGroupTest, self).setUp()
self._client = mock.MagicMock()
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_group_id(self, client_keystone):
self._client.client.groups.get.return_value = (self
._get_mock_group())
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_group_id(self.sample_uuid))
self._client.client.groups.get.assert_called_once_with(
self.sample_uuid)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_group_id_with_name(self, client_keystone):
self._client.client.groups.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.groups.list.return_value = [
self._get_mock_group()
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_group_id(self.sample_name))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.groups.get,
self.sample_name)
self._client.client.groups.list.assert_called_once_with(
name=self.sample_name)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_group_id_not_found(self, client_keystone):
self._client.client.groups.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.groups.list.return_value = [
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
ex = self.assertRaises(exception.EntityNotFound,
client_plugin.get_group_id,
self.sample_name)
msg = ("The KeystoneGroup (%(name)s) could not be found." %
{'name': self.sample_name})
self.assertEqual(msg, six.text_type(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.groups.get,
self.sample_name)
self._client.client.groups.list.assert_called_once_with(
name=self.sample_name)
class KeystoneClientPluginUserTest(common.HeatTestCase):
sample_uuid = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
sample_name = 'sample_user'
def _get_mock_user(self):
user = mock.MagicMock()
user.id = self.sample_uuid
user.name = self.sample_name
return user
def setUp(self):
super(KeystoneClientPluginUserTest, self).setUp()
self._client = mock.MagicMock()
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_user_id(self, client_keystone):
self._client.client.users.get.return_value = self._get_mock_user()
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_user_id(self.sample_uuid))
self._client.client.users.get.assert_called_once_with(
self.sample_uuid)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_user_id_with_name(self, client_keystone):
self._client.client.users.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.users.list.return_value = [
self._get_mock_user()
]
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
self.assertEqual(self.sample_uuid,
client_plugin.get_user_id(self.sample_name))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.users.get,
self.sample_name)
self._client.client.users.list.assert_called_once_with(
name=self.sample_name)
@mock.patch.object(client.KeystoneClientPlugin, 'client')
def test_get_user_id_not_found(self, client_keystone):
self._client.client.users.get.side_effect = (keystone_exceptions
.NotFound)
self._client.client.users.list.return_value = []
client_keystone.return_value = self._client
client_plugin = client.KeystoneClientPlugin(
context=mock.MagicMock()
)
ex = self.assertRaises(exception.EntityNotFound,
client_plugin.get_user_id,
self.sample_name)
msg = ('The KeystoneUser (%(name)s) could not be found.' %
{'name': self.sample_name})
self.assertEqual(msg, six.text_type(ex))
self.assertRaises(keystone_exceptions.NotFound,
self._client.client.users.get,
self.sample_name)
self._client.client.users.list.assert_called_once_with(
name=self.sample_name)
| apache-2.0 |
nicupavel/opp | src/opp.py | 1 | 9757 | #!/usr/bin/python
import openpanzer as op
from oppSql import *
from oppRdf import *
import util
from dbpedia import DbpediaQuery
from websearch import GoogleQuery, BingQuery
from urllib import quote, unquote
from pprint import pprint
import time,os, sys, errno
reload(sys)
sys.setdefaultencoding("utf-8")
OFFLINE_JSON_DIR = "../oppedia-offline"
if __name__ == "__main__":
eq = op.Equipment()
#eq.loadAllCountries()
eq.loadCountry(8) # Germany
print "Loaded %d units" % len(eq.eq)
# Finds a RDF resource URL based on a search string and a search provider
def searchRdfResource(searchString, provider=PROVIDER_DBPEDIA):
qdbpedia = DbpediaQuery()
qsearch = qdbpedia
if provider == PROVIDER_GOOGLE or provider == PROVIDER_GOOGLE_SPECIFIC:
qsearch = GoogleQuery()
if provider == PROVIDER_BING:
qsearch = BingQuery()
print "Searching %s with provider %s" % (searchString, provider)
r = qsearch.queryText(searchString)
if len(r) == 0:
print "Query %s with provider %s returned no results" % (searchString, provider)
return None
if provider == PROVIDER_DBPEDIA:
rdfResource = unquote(r[0])
else:
rdfResource = util.wikiToDBpedia(r[0])
# Web search returned resource might be a DBpedia redirect
if provider != PROVIDER_DBPEDIA:
tmp = qdbpedia.getRealUri(rdfResource) # resolve dbpedia redirect
if tmp is not None:
rdfResource = tmp
print "Resolved resource redirect to %s" % rdfResource
if rdfResource == "":
print "Empty resource returned"
return None
rdfData = qdbpedia.getFromResource(rdfResource)
if len(rdfData) == 0:
print "No DBpedia data for %s resource" % rdfResource
return None
label = rdfData[0]["label"]["value"]
print "Provider %s found label %s for resource %s" % (provider, label, rdfResource)
return { "label": label, "resource": rdfResource }
@db_session
def createSqlResourceSearch(unitId, searchString, rdfdb, provider=PROVIDER_DBPEDIA):
s = None
result = searchRdfResource(searchString, provider)
if result is not None:
url = result["resource"]
if rdfdb.load(url):
s = ResourceSearch(unitId = unitId, provider = provider, searchString = searchString, foundResource = url)
commit()
if s is None:
return None
else:
print "Cannot save RDF resource %s to DB" % url
return None
return { "sqlResource": s, "searchResult": result }
return None
@db_session
def createSqlUnitWithSpecifiedResource(unit, resource, rdfdb):
print "* Creating Unit %s (%d) with resource %s" % (unit.getFullName(), unit.id, resource)
if rdfdb.load(resource):
s = ResourceSearch(unitId = unit.id, provider = PROVIDER_CUSTOM, searchString = unit.getFullName(), foundResource = resource)
else:
return False
try:
u = OPPedia(id = unit.id, name = unit.name, country = unit.country, unitClass = unit.unitClass,
usedResourceSearch = s,
rdfStoredLabel = "",
rdfStoredResource = resource)
commit()
except:
print "Cannot save unit to SQL DB"
return False
return True
@db_session
def createSqlUnit(unit, rdfdb):
print "* Creating Unit %s (%d)" % (unit.getFullName(), unit.id)
dbpediaSearch = util.unitNameToRegex(unit.getNicerName())
webSearch = unit.getNicerName() + " " + unit.getClassName()
webFound = False
dbpediaResult = createSqlResourceSearch(unit.id, dbpediaSearch, rdfdb, provider=PROVIDER_DBPEDIA)
try:
webResult = createSqlResourceSearch(unit.id, webSearch, rdfdb, provider=PROVIDER_GOOGLE)
webFound = webResult is not None
except:
print "Google error ! Trying Bing"
if not webFound:
try:
webResult = createSqlResourceSearch(unit.id, webSearch, rdfdb, provider=PROVIDER_BING)
webFound = webResult is not None
except:
print "Bing error ! No Web search results. Aborting unit creation"
return False
if not webFound:
print "No Web search results. Aborting unit creation"
return False
chosenResource = None
chosenResult = None
if dbpediaResult is not None:
chosenResult = dbpediaResult["searchResult"]
chosenResource = dbpediaResult["sqlResource"]
# Prefer google result
if webResult is not None:
chosenResult = webResult["searchResult"]
chosenResource = webResult["sqlResource"]
if chosenResource is None:
print "No resource saved to DB. Aborting unit creation"
return False
try:
u = OPPedia(id = unit.id, name = unit.name, country = unit.country, unitClass = unit.unitClass,
usedResourceSearch=chosenResource,
rdfStoredLabel = chosenResult["label"],
rdfStoredResource = chosenResult["resource"])
commit()
except:
print "Cannot save unit to SQL DB"
return False
return True
@db_session
def updateUnit(id, rdfdb, eqlist = None):
if eqlist != None:
unit = eqlist.getUnit(id)
else:
unit = eq.getUnit(id)
if unit is None:
print "Unit %d not found in game db" % id
return False
sqlUnit = OPPedia[id]
if sqlUnit is None:
return createSqlUnit(unit, rdfdb)
print "* Updating Unit %s (%d)" % (unit.getFullName(), unit.id)
#print "forceRefresh=", sqlUnit.forceRefresh
sqlRes = sqlUnit.usedResourceSearch
foundRes = None
if sqlRes is not None:
foundRes = sqlUnit.usedResourceSearch.foundResource
if sqlUnit.forceRefresh:
# This means that user set a custom resource URL to be loaded
if sqlUnit.rdfStoredResource is not None and sqlUnit.rdfStoredResource != foundRes:
print "Unit %s (%d) forced refresh" % (unit.getFullName(), id)
if rdfdb.load(sqlUnit.rdfStoredResource):
s = ResourceSearch(unitId = unit.id, provider = PROVIDER_CUSTOM, searchString = unit.getNicerName(), foundResource = sqlUnit.rdfStoredResource)
sqlUnit.usedResourceSearch = s
sqlUnit.forceRefresh = False
commit()
else:
print "Cannot refresh PROVIDER_CUSTOM resource %s" % sqlUnit.rdfStoredResource
return False
# No found resource retry search and update unit if possible
if sqlUnit.forceRefresh or (foundRes is None and sqlRes is not None):
if sqlUnit.forceRefresh:
# Delete old searches
searches = select( s for s in ResourceSearch if s.unitId == id)
print "Unit %s (%d) doing forced refresh of search results" % (unit.getFullName(), id)
for s in searches:
s.delete()
else:
print "Unit %s (%d) has a resource without search results, refreshing" % (unit.getFullName(), id)
result = createSqlResourceSearch(id, sqlRes.searchString, rdfdb, sqlRes.provider)
if result is not None:
sqlUnit.rdfStoredResource = result["searchResult"]["resource"]
sqlUnit.rdfStoredLabel = result["searchResult"]["label"]
sqlUnit.usedResourceSearch = result["sqlResource"]
sqlUnit.forceRefresh = False
commit()
else:
print "Cannot refresh unit search"
return False
# TODO the case when unit has no google search results (to retry google)
# Has a resource but does it have RDF data ?
if foundRes is not None:
if not rdfdb.hasResource(foundRes):
print "Unit %s (%d) has a resource without rdf data, refreshing" % (unit.getFullName(), id)
if not rdfdb.load(foundRes):
return False
return True
@db_session
def generateOfflineJSON(id, rdfdb, lang="en"):
u = OPPedia[id]
if u is None:
print "Unit %d not found in DB" % id
return False
rdfResource = u.usedResourceSearch.foundResource
if rdfResource is None:
print "Resource for unit %d not found in RDF DB" % id
return False
path = os.path.join(OFFLINE_JSON_DIR, str(u.country), str(u.unitClass))
try:
os.makedirs(path)
except os.error, e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
print "Cannot create complete path %s" % path
return False
rdfData = rdfdb.getUnitDataFromResource(rdfResource, lang)
jsonFileName = os.path.join(path, str(u.id) + ".json")
print "Exporting to %s " % jsonFileName
# Fix export errors because it can't convert datatime
def customHandler(o):
return o.isoformat() if hasattr(o, 'isoformat') else o
try:
with open(jsonFileName, "w") as jsonFile:
json.dump(rdfData, jsonFile, sort_keys=True, ensure_ascii=True, indent=4, default=customHandler)
except Exception, e:
print "Cannot generate json %s" % str(e)
return False
return True
@db_session
def offlineExportAll(rdfdb, lang="en"):
ids = select(u.id for u in OPPedia)[:]
for id in ids:
generateOfflineJSON(id, rdfdb, lang)
if __name__ == "__main__":
rdfdb = OppRdf()
rdfdb.init()
for id in eq.eq:
if updateUnit(id, rdfdb):
time.sleep(1)
#generateOfflineJSON(79, rdfdb)
#offlineExportAll(rdfdb)
#unit = eq.getUnit(111)
#createSqlUnitWithSpecifiedResource(unit, "http://dbpedia.org/resource/Pioneer_(military)", rdfdb)
rdfdb.close()
| gpl-2.0 |
nitin-cherian/Webapps | SimpleIsBetterThanComplex.com/myproject/.env/lib/python3.5/site-packages/django/contrib/gis/geos/prototypes/io.py | 41 | 11415 | import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils import six
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_char])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self.destructor.func = self.destructor.get_func(
*self.destructor.args, **self.destructor.kwargs
)
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
ptr_type = WKT_READ_PTR
destructor = wkt_reader_destroy
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
ptr_type = WKB_READ_PTR
destructor = wkb_reader_destroy
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, six.memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
ptr_type = WKT_WRITE_PTR
destructor = wkt_writer_destroy
_trim = False
_precision = None
def __init__(self, dim=2, trim=False, precision=None):
super(WKTWriter, self).__init__()
if bool(trim) != self._trim:
self.trim = trim
if precision is not None:
self.precision = precision
self.outdim = dim
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
if bool(flag) != self._trim:
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, b'\x01' if flag else b'\x00')
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if (not isinstance(precision, int) or precision < 0) and precision is not None:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
if precision != self._precision:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
ptr_type = WKB_WRITE_PTR
destructor = wkb_writer_destroy
def __init__(self, dim=2):
super(WKBWriter, self).__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float('NaN'), float('NaN'), srid=geom.srid)
else:
raise ValueError('Empty point is not representable in WKB.')
return geom
def write(self, geom):
"Returns the WKB representation of the given geometry."
from django.contrib.gis.geos import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
# Fix GEOS output for empty polygon.
# See https://trac.osgeo.org/geos/ticket/680.
wkb = wkb[:-8] + b'\0' * 4
return six.memoryview(wkb)
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
from django.contrib.gis.geos.polygon import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
wkb = wkb[:-16] + b'0' * 8
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
@srid.setter
def srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2, trim=False, precision=None):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision)
else:
thread_context.wkt_w.outdim = dim
thread_context.wkt_w.trim = trim
thread_context.wkt_w.precision = precision
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter(dim=dim)
else:
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter(dim=dim)
thread_context.ewkb_w.srid = True
else:
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
| mit |
UpYou/relay | my_gnuradio/gr/qa_pll_carriertracking.py | 6 | 7835 | #!/usr/bin/env python
#
# Copyright 2004,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import math
class test_sig_source (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test_pll_carriertracking (self):
expected_result = ((1.00000238419+6.47922693275e-09j),
(0.998399555683+0.0565364062786j),
(0.994261980057+0.10695001483j),
(0.98843306303+0.151648163795j),
(0.981579363346+0.191063538194j),
(0.974212288857+0.225630432367j),
(0.966734290123+0.255773901939j),
(0.959442555904+0.281897842884j),
(0.952551782131+0.304379671812j),
(0.946205317974+0.323566257954j),
(0.940503358841+0.339778244495j),
(0.935505151749+0.353307723999j),
(0.931235432625+0.364419162273j),
(0.927616357803+0.373535633087j),
(0.924710214138+0.380666583776j),
(0.922494113445+0.386005342007j),
(0.92093116045+0.389725029469j),
(0.919974088669+0.391981720924j),
(0.919572234154+0.392916500568j),
(0.919680893421+0.392660915852j),
(0.920248389244+0.39133310318j),
(0.921222627163+0.389039844275j),
(0.922548472881+0.385877460241j),
(0.924184799194+0.381939411163j),
(0.926086127758+0.377309292555j),
(0.928135097027+0.37224984169j),
(0.930293083191+0.366814315319j),
(0.932614028454+0.360868781805j),
(0.935064375401+0.354473829269j),
(0.937613248825+0.347684770823j),
(0.940225422382+0.340550601482j),
(0.942881464958+0.33312189579j),
(0.945559620857+0.325443327427j),
(0.948240220547+0.31755694747j),
(0.950899422169+0.309499144554j),
(0.953524827957+0.301307469606j),
(0.956105649471+0.293015599251j),
(0.958630502224+0.284654557705j),
(0.96103054285+0.276443749666j),
(0.963361799717+0.26819768548j),
(0.965623259544+0.259936869144j),
(0.967810571194+0.251679092646j),
(0.969916880131+0.243440493941j),
(0.971936583519+0.235235646367j),
(0.97387367487+0.227080151439j),
(0.975726902485+0.218987599015j),
(0.977494239807+0.210969462991j),
(0.979169845581+0.203035995364j),
(0.980761289597+0.195199295878j),
(0.982269346714+0.187469303608j),
(0.983659446239+0.180052131414j),
(0.984931468964+0.1729388237j),
(0.986136198044+0.165923252702j),
(0.987275123596+0.159012272954j),
(0.988349795341+0.15221118927j),
(0.989354014397+0.145524248481j),
(0.990296065807+0.138957872987j),
(0.991178870201+0.132516458631j),
(0.992005050182+0.126204773784j),
(0.992770493031+0.120025672019j),
(0.993480443954+0.113984130323j),
(0.994139909744+0.108083210886j),
(0.994751393795+0.102326385677j),
(0.995293080807+0.0969148278236j),
(0.995791256428+0.091630294919j),
(0.996252119541+0.0864710733294j),
(0.996678769588+0.0814334899187j),
(0.997069239616+0.0765165910125j),
(0.997423350811+0.071716658771j),
(0.997748315334+0.0670333206654j),
(0.998046517372+0.0624645166099j),
(0.998317599297+0.058009263128j),
(0.998557567596+0.053665690124j),
(0.998775064945+0.0494344644248j),
(0.998971700668+0.0453144386411j),
(0.999140620232+0.0415064357221j),
(0.99927687645+0.0379924885929j),
(0.999400436878+0.0345549099147j),
(0.999511957169+0.0311931278557j),
(0.99961233139+0.0279070306569j),
(0.999694347382+0.0246965941042j),
(0.999765276909+0.0215622838587j),
(0.999826848507+0.0185046810657j),
(0.999880313873+0.0155246723443j),
(0.999920129776+0.0126227736473j),
(0.999949812889+0.00980060640723j),
(0.99997317791+0.00705910893157j),
(0.999990820885+0.00439921114594j),
(0.999998450279+0.00202245195396j),
(0.999998092651-0.00029227725463j),
(0.999994516373-0.00254815118387j),
(0.999988794327-0.00474932929501j),
(0.999977111816-0.00689708162099j),
(0.999957799911-0.00899503659457j),
(0.999936699867-0.0110441967845j),
(0.999914228916-0.0130464555696j),
(0.999889075756-0.0150024276227j),
(0.999855577946-0.0169130507857j),
(0.999821305275-0.0187777336687j),
(0.999786794186-0.0205969288945j))
sampling_freq = 10e3
freq = sampling_freq / 100
alpha = 0.1
beta = alpha * alpha / 4.0
maxf = 1
minf = -1
src = gr.sig_source_c (sampling_freq, gr.GR_COS_WAVE, freq, 1.0)
pll = gr.pll_carriertracking_cc(alpha, beta, maxf, minf)
head = gr.head (gr.sizeof_gr_complex, int (freq))
dst = gr.vector_sink_c ()
self.tb.connect (src, pll, head)
self.tb.connect (head, dst)
self.tb.run ()
dst_data = dst.data ()
self.assertComplexTuplesAlmostEqual (expected_result, dst_data, 5)
if __name__ == '__main__':
gr_unittest.main ()
| gpl-3.0 |
CodeDJ/qt5-hidpi | qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py | 123 | 7521 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import logging
import re
from webkitpy.layout_tests.models import test_expectations
_log = logging.getLogger(__name__)
class LayoutTestFinder(object):
def __init__(self, port, options):
self._port = port
self._options = options
self._filesystem = self._port.host.filesystem
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
def find_tests(self, options, args):
paths = self._strip_test_dir_prefixes(args)
if options.test_list:
paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR))
test_files = self._port.tests(paths)
return (paths, test_files)
def _strip_test_dir_prefixes(self, paths):
return [self._strip_test_dir_prefix(path) for path in paths if path]
def _strip_test_dir_prefix(self, path):
# Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
# the filesystem uses '\\' as a directory separator.
if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
return path
def _read_test_names_from_file(self, filenames, test_path_separator):
fs = self._filesystem
tests = []
for filename in filenames:
try:
if test_path_separator != fs.sep:
filename = filename.replace(test_path_separator, fs.sep)
file_contents = fs.read_text_file(filename).split('\n')
for line in file_contents:
line = self._strip_comments(line)
if line:
tests.append(line)
except IOError, e:
if e.errno == errno.ENOENT:
_log.critical('')
_log.critical('--test-list file "%s" not found' % file)
raise
return tests
@staticmethod
def _strip_comments(line):
commentIndex = line.find('//')
if commentIndex is -1:
commentIndex = len(line)
line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
if line == '':
return None
else:
return line
def skip_tests(self, paths, all_tests_list, expectations, http_tests):
all_tests = set(all_tests_list)
tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
if self._options.skip_failing_tests:
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
if self._options.skipped == 'only':
tests_to_skip = all_tests - tests_to_skip
elif self._options.skipped == 'ignore':
tests_to_skip = set()
elif self._options.skipped != 'always':
# make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
tests_to_skip -= set(paths)
# unless of course we don't want to run the HTTP tests :)
if not self._options.http:
tests_to_skip.update(set(http_tests))
return tests_to_skip
def split_into_chunks(self, test_names):
"""split into a list to run and a set to skip, based on --run-chunk and --run-part."""
if not self._options.run_chunk and not self._options.run_part:
return test_names, set()
# If the user specifies they just want to run a subset of the tests,
# just grab a subset of the non-skipped tests.
chunk_value = self._options.run_chunk or self._options.run_part
try:
(chunk_num, chunk_len) = chunk_value.split(":")
chunk_num = int(chunk_num)
assert(chunk_num >= 0)
test_size = int(chunk_len)
assert(test_size > 0)
except AssertionError:
_log.critical("invalid chunk '%s'" % chunk_value)
return (None, None)
# Get the number of tests
num_tests = len(test_names)
# Get the start offset of the slice.
if self._options.run_chunk:
chunk_len = test_size
# In this case chunk_num can be really large. We need
# to make the slave fit in the current number of tests.
slice_start = (chunk_num * chunk_len) % num_tests
else:
# Validate the data.
assert(test_size <= num_tests)
assert(chunk_num <= test_size)
# To count the chunk_len, and make sure we don't skip
# some tests, we round to the next value that fits exactly
# all the parts.
rounded_tests = num_tests
if rounded_tests % test_size != 0:
rounded_tests = (num_tests + test_size - (num_tests % test_size))
chunk_len = rounded_tests / test_size
slice_start = chunk_len * (chunk_num - 1)
# It does not mind if we go over test_size.
# Get the end offset of the slice.
slice_end = min(num_tests, slice_start + chunk_len)
tests_to_run = test_names[slice_start:slice_end]
_log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
# If we reached the end and we don't have enough tests, we run some
# from the beginning.
if slice_end - slice_start < chunk_len:
extra = chunk_len - (slice_end - slice_start)
_log.debug(' last chunk is partial, appending [0:%d]' % extra)
tests_to_run.extend(test_names[0:extra])
return (tests_to_run, set(test_names) - set(tests_to_run))
| lgpl-2.1 |
rghe/ansible | lib/ansible/modules/cloud/azure/azure_rm_image_facts.py | 18 | 9598 | #!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_image_facts
version_added: "2.8"
short_description: Get facts about azure custom images.
description:
- List azure custom images. The images can be listed where scope of listing can be based on subscription, resource group, name or tags.
options:
resource_group:
description:
- Name of resource group.
name:
description:
- Name of the image to filter from existing images.
tags:
description:
- List of tags to be matched.
extends_documentation_fragment:
- azure
author:
- "Madhura Naniwadekar(@Madhura-CSI)"
'''
EXAMPLES = '''
- name: List images with name
azure_rm_image_facts:
name: test-image
resource_group: test-resource-group
- name: List images by resource group
azure_rm_image_facts:
resource_group: test-resource-group
tags:
- testing
- foo:bar
- name: List all available images under current subscription
azure_rm_image_facts:
'''
RETURN = '''
images:
description: List of image dicts.
returned: always
type: complex
contains:
id:
description:
- Id of the image.
returned: always
type: str
sample: /subscriptions/xxxx/resourceGroups/xxx/providers/Microsoft.Compute/images/xx
name:
description:
- Name of the image.
returned: always
type: str
resource_group:
description:
- Resource group of the image.
returned: always
type: str
location:
description:
- Location of the image.
returned: always
type: str
os_disk:
description:
- Id of os disk for image.
type: str
sample: /subscriptions/xxxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xx
os_disk_caching:
description:
- Specifies caching requirements for the image.
returned: always
type: str
os_state:
description:
- Specifies image operating system state. Possible values are 'Generalized' or 'Specialized'.
returned: always
type: str
sample: Generalized
os_storage_account_type:
description:
- Specifies the storage account type for the managed disk.
type: str
sample: Standard_LRS
os_type:
description:
- Type of OS for image.
returned: always
type: str
sample: Linux
provisioning_state:
description:
- State of image.
returned: always
type: str
sample: Succeeded
source:
description:
- Resource id of source VM from which the image is created
type: str
sample: /subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/virtualMachines/xx
tags:
description:
- Dictionary of tags associated with the image.
type: complex
data_disks:
description:
- List of data disks associated with the image.
type: complex
returned: always
contains:
caching:
description:
- Type of caching of data disk.
sample: read_only
disk_size_gb:
description:
- Specifies the size of empty data disks in gigabytes.
returned: always
type: int
sample: 50
lun:
description:
- Specifies the logical unit number of the data disk.
returned: always
type: int
sample: 0
storage_account_type:
description:
- Specifies the storage account type for the managed disk data disk.
type: str
sample: Standard_LRS
managed_disk_id:
description:
- Id of managed disk.
type: str
sample: /subscriptions/xxxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xx
blob_uri:
description:
- The virtual hard disk.
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
class AzureRMImageFacts(AzureRMModuleBase):
def __init__(self, **kwargs):
self.module_arg_spec = dict(
resource_group=dict(type='str'),
name=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False
)
self.resource_group = None
self.name = None
self.format = None
self.tags = None
super(AzureRMImageFacts, self).__init__(
derived_arg_spec=self.module_arg_spec,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and self.resource_group:
self.results['images'] = self.get_image(self.resource_group, self.name)
elif self.name and not self.resource_group:
self.results['images'] = self.list_images(self.name)
elif not self.name and self.resource_group:
self.results['images'] = self.list_images_by_resource_group(self.resource_group)
elif not self.name and not self.resource_group:
self.results['images'] = self.list_images()
return self.results
def get_image(self, resource_group, image_name):
'''
Returns image details based on its name
'''
self.log('Get properties for {0}'.format(self.name))
result = []
item = None
try:
item = self.compute_client.images.get(resource_group, image_name)
except CloudError as exc:
self.fail('Failed to list images - {0}'.format(str(exc)))
result = [self.format_item(item)]
return result
def list_images_by_resource_group(self, resource_group):
'''
Returns image details based on its resource group
'''
self.log('List images filtered by resource group')
response = None
try:
response = self.compute_client.images.list_by_resource_group(resource_group)
except CloudError as exc:
self.fail("Failed to list images: {0}".format(str(exc)))
return [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else []
def list_images(self, image_name=None):
'''
Returns image details in current subscription
'''
self.log('List images within current subscription')
response = None
results = []
try:
response = self.compute_client.images.list()
except CloudError as exc:
self.fail("Failed to list all images: {0}".format(str(exc)))
results = [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else []
if image_name:
results = [result for result in results if result['name'] == image_name]
return results
def format_item(self, item):
d = item.as_dict()
for data_disk in d['storage_profile']['data_disks']:
if 'managed_disk' in data_disk.keys():
data_disk['managed_disk_id'] = data_disk['managed_disk']['id']
data_disk.pop('managed_disk', None)
d = {
'id': d['id'],
'resource_group': d['id'].split('/')[4],
'name': d['name'],
'location': d['location'],
'tags': d.get('tags'),
'source': d['source_virtual_machine']['id'] if 'source_virtual_machine' in d.keys() else None,
'os_type': d['storage_profile']['os_disk']['os_type'],
'os_state': d['storage_profile']['os_disk']['os_state'],
'os_disk_caching': d['storage_profile']['os_disk']['caching'],
'os_storage_account_type': d['storage_profile']['os_disk']['storage_account_type'],
'os_disk': d['storage_profile']['os_disk']['managed_disk']['id'] if 'managed_disk' in d['storage_profile']['os_disk'].keys() else None,
'os_blob_uri': d['storage_profile']['os_disk']['blob_uri'] if 'blob_uri' in d['storage_profile']['os_disk'].keys() else None,
'provisioning_state': d['provisioning_state'],
'data_disks': d['storage_profile']['data_disks']
}
return d
def main():
AzureRMImageFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
DTUWindEnergy/Python4WindEnergy | lesson 3/results/ebra.py | 1 | 8402 | # -*- coding: utf-8 -*- <nbformat>3.0</nbformat>
# <headingcell level=1>
# Plotting with Matplotlib
# <headingcell level=2>
# Prepare for action
# <codecell>
import numpy as np
import scipy as sp
import sympy
# Pylab combines the pyplot functionality (for plotting) with the numpy
# functionality (for mathematics and for working with arrays) in a single namespace
# aims to provide a closer MATLAB feel (the easy way). Note that his approach
# should only be used when doing some interactive quick and dirty data inspection.
# DO NOT USE THIS FOR SCRIPTS
#from pylab import *
# the convienient Matplotib plotting interface pyplot (the tidy/right way)
# use this for building scripts. The examples here will all use pyplot.
import matplotlib.pyplot as plt
# for using the matplotlib API directly (the hard and verbose way)
# use this when building applications, and/or backends
import matplotlib as mpl
# <markdowncell>
# How would you like the IPython notebook show your plots? In order to use the
# matplotlib IPython magic youre IPython notebook should be launched as
#
# ipython notebook --matplotlib=inline
#
# Make plots appear as a pop up window, chose the backend: 'gtk', 'inline', 'osx', 'qt', 'qt4', 'tk', 'wx'
#
# %matplotlib qt
#
# or inline the notebook (no panning, zooming through the plot). Not working in IPython 0.x
#
# %matplotib inline
#
# <codecell>
# activate pop up plots
#%matplotlib qt
# or change to inline plots
# %matplotlib inline
# <headingcell level=3>
# Matplotlib documentation
# <markdowncell>
# Finding your own way (aka RTFM). Hint: there is search box available!
#
# * http://matplotlib.org/contents.html
#
# The Matplotlib API docs:
#
# * http://matplotlib.org/api/index.html
#
# Pyplot, object oriented plotting:
#
# * http://matplotlib.org/api/pyplot_api.html
# * http://matplotlib.org/api/pyplot_summary.html
#
# Extensive gallery with examples:
#
# * http://matplotlib.org/gallery.html
# <headingcell level=3>
# Tutorials for those who want to start playing
# <markdowncell>
# If reading manuals is too much for you, there is a very good tutorial available here:
#
# * http://nbviewer.ipython.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-4-Matplotlib.ipynb
#
# Note that this tutorial uses
#
# from pylab import *
#
# which is usually not adviced in more advanced script environments. When using
#
# import matplotlib.pyplot as plt
#
# you need to preceed all plotting commands as used in the above tutorial with
#
# plt.
# <markdowncell>
# Give me more!
#
# [EuroScipy 2012 Matlotlib tutorial](http://www.loria.fr/~rougier/teaching/matplotlib/). Note that here the author uses ```from pylab import * ```. When using ```import matplotliblib.pyplot as plt``` the plotting commands need to be proceeded with ```plt.```
# <headingcell level=2>
# Plotting template starting point
# <codecell>
# some sample data
x = np.arange(-10,10,0.1)
# <markdowncell>
# To change the default plot configuration values.
# <codecell>
page_width_cm = 13
dpi = 200
inch = 2.54 # inch in cm
# setting global plot configuration using the RC configuration style
plt.rc('font', family='serif')
plt.rc('xtick', labelsize=12) # tick labels
plt.rc('ytick', labelsize=20) # tick labels
plt.rc('axes', labelsize=20) # axes labels
# If you don’t need LaTeX, don’t use it. It is slower to plot, and text
# looks just fine without. If you need it, e.g. for symbols, then use it.
#plt.rc('text', usetex=True) #<- P-E: Doesn't work on my Mac
# <codecell>
# create a figure instance, note that figure size is given in inches!
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,6))
# set the big title (note aligment relative to figure)
fig.suptitle("suptitle 16, figure alignment", fontsize=16)
# actual plotting
ax.plot(x, x**2, label="label 12")
# set axes title (note aligment relative to axes)
ax.set_title("title 14, axes alignment", fontsize=14)
# axes labels
ax.set_xlabel('xlabel 12')
ax.set_ylabel(r'$y_{\alpha}$ 12', fontsize=8)
# legend
ax.legend(fontsize=12, loc="best")
# saving the figure in different formats
# fig.savefig('figure-%03i.png' % dpi, dpi=dpi)
# fig.savefig('figure.svg')
# fig.savefig('figure.eps')
# <codecell>
# following steps are only relevant when using figures as pop up windows (with %matplotlib qt)
# to update a figure with has been modified
fig.canvas.draw()
# show a figure
fig.show()
# <headingcell level=2>
# Exercise
# <markdowncell>
# The current section is about you trying to figure out how to do several plotting features. You should use the previously mentioned resources to find how to do that. In many cases, google is your friend!
# <markdowncell>
# * add a grid to the plot
# <codecell>
plt.plot(x,x**2)
plt.grid('on')
# <markdowncell>
# * change the location of the legend to different places
# <codecell>
plt.plot(x,x**2, label="label 12")
plt.legend(fontsize=12, loc="upper right")
# <markdowncell>
# * find a way to control the line type and color, marker type and color, control the frequency of the marks (`markevery`). See plot options at: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
# <codecell>
stride = max( int(len(x) / 20), 1)
plt.plot(x,x**2, 'ko-',color='forestgreen', markevery=stride,label="label 12")
plt.legend(fontsize=12, loc="upper center")
# <markdowncell>
# * add different sub-plots
# <codecell>
fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)
axes[0].plot(x,x**2)
axes[1].plot(x,-x**2)
# <markdowncell>
# * size the figure such that when included on an A4 page the fonts are given in their true size
# <codecell>
# matplotlib.rcParams.update({'font.size': 22})
fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True)
axes[0].plot(x,x**2)
axes[1].plot(x,-x**2)
fig.set_size_inches(8.2,3) # using A4 width in inches?
fig.set_dpi(100)
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
# ax[0].set('xtick', labelsize=12) # tick labels
# .rc('ytick', labelsize=20) # tick labels
# .rc('axes', labelsize=20) # axes labels
# fig.savefig('figure.pdf')
# <markdowncell>
# * make a contour plot
# <codecell>
X, Y = np.meshgrid(x,x)
plt.figure()
plt.contourf(X,Y,X*Y,linewidth=0.3,cmap=plt.get_cmap('hsv'),levels=np.arange(-1,1,0.1))
plt.show
# im=ax.contourf(x,y,ui,levels=np.arange(Umean-5*Ustd,Umean+5*Ustd,Ustd/30),cmap=plt.get_cmap('hsv'),linewidth=0.1)
# <markdowncell>
# * use twinx() to create a second axis on the right for the second plot
# <codecell>
plt.figure()
ax=plt.gca()
ax.plot(x,x**2)
ax2 = ax.twinx()
ax2.plot(x,x**4, 'r')
# <markdowncell>
# * add horizontal and vertical lines using axvline(), axhline()
# <codecell>
plt.figure()
plt.plot(x,x**2)
plt.axvline(2)
plt.axhline(10)
# <markdowncell>
# * autoformat dates for nice printing on the x-axis using fig.autofmt_xdate()
# <codecell>
import datetime
dates = np.array([datetime.datetime.now() + datetime.timedelta(days=i) for i in xrange(24)])
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(dates,xrange(24))
fig.autofmt_xdate()
# <headingcell level=2>
# Advanced exercises
# <markdowncell>
# We are going to play a bit with regression
# <markdowncell>
# * Create a vector x of equally spaced number between $x \in [0, 5\pi]$ of 1000 points (keyword: linspace)
# <codecell>
n=1000
x=np.linspace(0,5*np.pi,n)
# <markdowncell>
# * create a vector y, so that y=sin(x) with some random noise
# <codecell>
y = np.sin(x) +np.random.rand(n)-0.5
yth = np.sin(x)
# <markdowncell>
# * plot it like this: 
# <codecell>
fig=plt.figure()
ax=plt.gca()
ax.plot(x,y,'b.')
ax.plot(x,yth,'k--',label=r'$y=sin(x)$')
# <markdowncell>
# Try to do a polynomial fit on y(x) with different polynomial degree (Use numpy.polyfit to obtain coefficients)
#
# Plot it like this (use np.poly1d(coef)(x) to plot polynomials) 
# <codecell>
for order in xrange(9):
coeff=np.polyfit(x,y,order)
ax.plot(x,np.poly1d(coeff)(x),label='deg %d'%order)
# shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# <codecell>
| apache-2.0 |
charlesccychen/incubator-beam | sdks/python/apache_beam/io/sources_test.py | 5 | 4179 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the sources framework."""
from __future__ import absolute_import
import logging
import os
import sys
import tempfile
import unittest
import apache_beam as beam
from apache_beam import coders
from apache_beam.io import iobase
from apache_beam.io import range_trackers
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class LineSource(iobase.BoundedSource):
"""A simple source that reads lines from a given file."""
TEST_BUNDLE_SIZE = 10
def __init__(self, file_name):
self._file_name = file_name
def read(self, range_tracker):
with open(self._file_name, 'rb') as f:
start = range_tracker.start_position()
f.seek(start)
if start > 0:
f.seek(-1, os.SEEK_CUR)
start -= 1
start += len(f.readline())
current = start
for line in f:
if not range_tracker.try_claim(current):
return
yield line.rstrip('\n')
current += len(line)
def split(self, desired_bundle_size, start_position=None, stop_position=None):
assert start_position is None
assert stop_position is None
with open(self._file_name, 'rb') as f:
f.seek(0, os.SEEK_END)
size = f.tell()
bundle_start = 0
while bundle_start < size:
bundle_stop = min(bundle_start + LineSource.TEST_BUNDLE_SIZE, size)
yield iobase.SourceBundle(1, self, bundle_start, bundle_stop)
bundle_start = bundle_stop
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = 0
if stop_position is None:
with open(self._file_name, 'rb') as f:
f.seek(0, os.SEEK_END)
stop_position = f.tell()
return range_trackers.OffsetRangeTracker(start_position, stop_position)
def default_output_coder(self):
return coders.BytesCoder()
class SourcesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Method has been renamed in Python 3
if sys.version_info[0] < 3:
cls.assertCountEqual = cls.assertItemsEqual
def _create_temp_file(self, contents):
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(contents)
return f.name
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3'
'TODO: BEAM-5627')
def test_read_from_source(self):
file_name = self._create_temp_file('aaaa\nbbbb\ncccc\ndddd')
source = LineSource(file_name)
range_tracker = source.get_range_tracker(None, None)
result = [line for line in source.read(range_tracker)]
self.assertCountEqual(['aaaa', 'bbbb', 'cccc', 'dddd'], result)
@unittest.skipIf(sys.version_info[0] == 3 and
os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3'
'TODO: BEAM-5627')
def test_run_direct(self):
file_name = self._create_temp_file('aaaa\nbbbb\ncccc\ndddd')
pipeline = TestPipeline()
pcoll = pipeline | beam.io.Read(LineSource(file_name))
assert_that(pcoll, equal_to(['aaaa', 'bbbb', 'cccc', 'dddd']))
pipeline.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
ymero/pulsar | pulsar/async/futures.py | 5 | 11508 | from collections import Mapping
from inspect import isgeneratorfunction
from functools import wraps, partial
from asyncio import Future, CancelledError, TimeoutError, async, sleep
from .consts import MAX_ASYNC_WHILE
from .access import get_event_loop, LOGGER, isfuture, is_async
__all__ = ['maybe_async',
'run_in_loop',
'add_errback',
'add_callback',
'task_callback',
'multi_async',
'as_coroutine',
'task',
'async',
'async_while',
'chain_future',
'future_result_exc',
'AsyncObject']
def chain_future(future, callback=None, errback=None, next=None):
'''Chain a :class:`~asyncio.Future` to an existing ``future``.
This function `chain` the ``next`` future to an existing ``future``.
When the input ``future`` receive a result the optional
``callback`` is executed and its result set as the results of ``next``.
If an exception occurs the optional ``errback`` is executed.
:param future: the original :class:`~asyncio.Future` (can be a coroutine)
:param callback: optional callback to execute on the result of ``future``
:param errback: optional callback to execute on the exception of ``future``
:param next: optional :class:`~asyncio.Future` to chain.
If not provided a new future is created
:return: the future ``next``
'''
future = async(future)
if next is None:
next = Future(loop=future._loop)
def _callback(fut):
try:
try:
result = future.result()
except Exception as exc:
if errback:
result = errback(exc)
exc = None
else:
raise
else:
if callback:
result = callback(result)
except Exception as exc:
next.set_exception(exc)
else:
if isinstance(result, Future):
chain_future(result, next=next)
else:
next.set_result(result)
future.add_done_callback(_callback)
return next
def as_exception(future):
if future._exception:
return future.exception()
elif future.cancelled():
return CancelledError()
def add_errback(future, callback, loop=None):
'''Add a ``callback`` to a ``future`` executed only if an exception
or cancellation has occurred.'''
def _error_back(fut):
if fut._exception:
callback(fut.exception())
elif fut.cancelled():
callback(CancelledError())
future = async(future, loop=None)
future.add_done_callback(_error_back)
return future
def add_callback(future, callback, loop=None):
'''Add a ``callback`` to ``future`` executed only if an exception
has not occurred.'''
def _call_back(fut):
if not (fut._exception or fut.cancelled()):
callback(fut.result())
future = async(future, loop=None)
future.add_done_callback(_call_back)
return future
def future_result_exc(future):
'''Return a two elements tuple containing the future result and exception.
The :class:`.Future` must be ``done``
'''
if future.cancelled():
return None, CancelledError()
elif future._exception:
return None, future.exception()
else:
return future.result(), None
def task_callback(callback):
@wraps(callback)
def _task_callback(fut):
return async(callback(fut.result()), fut._loop)
return _task_callback
def maybe_async(value, loop=None):
'''Handle a possible asynchronous ``value``.
Return an :ref:`asynchronous instance <tutorials-coroutine>`
only if ``value`` is a generator, a :class:`.Future`.
:parameter value: the value to convert to an asynchronous instance
if it needs to.
:parameter loop: optional :class:`.EventLoop`.
:return: a :class:`.Future` or a synchronous ``value``.
'''
try:
return async(value, loop=loop)
except TypeError:
return value
def as_coroutine(value):
if is_async(value):
value = yield from value
return value
def task(function):
'''Thread-safe decorator to run a ``function`` in an event loop.
:param function: a callable which can return coroutines,
:class:`.asyncio.Future` or synchronous data. Can be a method of
an :ref:`async object <async-object>`, in which case the loop
is given by the object ``_loop`` attribute.
:return: a :class:`~asyncio.Future`
'''
if isgeneratorfunction(function):
wrapper = function
else:
def wrapper(*args, **kw):
res = function(*args, **kw)
if res:
res = yield from res
return res
@wraps(function)
def _(*args, **kwargs):
loop = getattr(args[0], '_loop', None) if args else None
coro = wrapper(*args, **kwargs)
return async(coro, loop=loop)
return _
def run_in_loop(_loop, callable, *args, **kwargs):
'''Run ``callable`` in the event ``loop`` thread, thread safe.
:param _loop: The event loop where ``callable`` is run
:return: a :class:`~asyncio.Future`
'''
waiter = Future(loop=_loop)
def _():
try:
result = callable(*args, **kwargs)
except Exception as exc:
waiter.set_exception(exc)
else:
try:
future = async(result, loop=_loop)
except TypeError:
waiter.set_result(result)
else:
chain_future(future, next=waiter)
_loop.call_soon_threadsafe(_)
return waiter
def async_while(timeout, while_clause, *args):
'''The asynchronous equivalent of ``while while_clause(*args):``
Use this function within a :ref:`coroutine <coroutine>` when you need
to wait ``while_clause`` to be satisfied.
:parameter timeout: a timeout in seconds after which this function stop.
:parameter while_clause: while clause callable.
:parameter args: optional arguments to pass to the ``while_clause``
callable.
:return: A :class:`.Future`.
'''
loop = get_event_loop()
start = loop.time()
di = 0.1
interval = 0
result = while_clause(*args)
while result:
interval = min(interval+di, MAX_ASYNC_WHILE)
try:
yield from sleep(interval, loop=loop)
except TimeoutError:
pass
if timeout and loop.time() - start >= timeout:
break
result = while_clause(*args)
return result
# ############################################################## Bench
class Bench:
'''Execute a given number of asynchronous requests and wait for results.
'''
start = None
'''The :meth:`~asyncio.BaseEventLoop.time` when the execution starts'''
finish = None
'''The :meth:`~asyncio.BaseEventLoop.time` when the execution finishes'''
result = ()
'''Tuple of results'''
def __init__(self, times, loop=None):
self._loop = loop or get_event_loop()
self.times = times
@property
def taken(self):
'''The total time taken for execution
'''
if self.finish:
return self.finish - self.start
def __call__(self, func, *args, **kwargs):
self.start = self._loop.time()
data = (func(*args, **kwargs) for t in range(self.times))
self.result = multi_async(data, loop=self._loop)
return chain_future(self.result, callback=self._done)
def _done(self, result):
self.finish = self._loop.time()
self.result = tuple(result)
return self
# ############################################################## AsyncObject
class AsyncObject(object):
'''Interface for :ref:`async objects <async-object>`
.. attribute:: _loop
The :ref:`event loop <asyncio-event-loop>` associated with this object
.. attribute:: _logger
Optional logger instance, used by the :attr:`logger` attribute
'''
_logger = None
_loop = None
@property
def logger(self):
'''The logger for this object.
It is either the :attr:`_logger` or the logger of the :attr:`_loop`
'''
return self._logger or getattr(self._loop, 'logger', LOGGER)
def timeit(self, method, times, *args, **kwargs):
'''Useful utility for benchmarking an asynchronous ``method``.
:param method: the name of the ``method`` to execute
:param times: number of times to execute the ``method``
:param args: positional arguments to pass to the ``method``
:param kwargs: key-valued arguments to pass to the ``method``
:return: a :class:`~asyncio.Future` which results in a :class:`Bench`
object if successful
The usage is simple::
>>> b = self.timeit('asyncmethod', 100)
'''
bench = Bench(times, loop=self._loop)
return bench(getattr(self, method), *args, **kwargs)
# ############################################################## MultiFuture
class MultiFuture(Future):
'''Handle several futures at once. Thread safe.
'''
def __init__(self, data=None, loop=None, type=None, raise_on_error=True):
super().__init__(loop=loop)
self._futures = {}
self._failures = []
self._raise_on_error = raise_on_error
if data is not None:
type = type or data.__class__
if issubclass(type, Mapping):
data = data.items()
else:
type = list
data = enumerate(data)
else:
type = list
data = ()
self._stream = type()
for key, value in data:
value = self._get_set_item(key, maybe_async(value, loop))
if isfuture(value):
self._futures[key] = value
value.add_done_callback(partial(self._future_done, key))
elif self.done():
break
self._check()
@property
def failures(self):
return self._failures
# INTERNALS
def _check(self):
if not self._futures and not self.done():
self.set_result(self._stream)
def _future_done(self, key, future, inthread=False):
# called by future when future is done
# thread safe
if inthread or future._loop is self._loop:
self._futures.pop(key, None)
if not self.done():
self._get_set_item(key, future)
self._check()
else:
self._loop.call_soon_threadsafe(
self._future_done, key, future, True)
def _get_set_item(self, key, value):
if isfuture(value):
if value.done():
exc = as_exception(value)
if exc:
if self._raise_on_error:
self._futures.clear()
self.set_exception(exc)
return
else:
self._failures.append(exc)
value = exc
else:
value = value._result
stream = self._stream
if isinstance(stream, list) and key == len(stream):
stream.append(value)
else:
stream[key] = value
return value
# Backward compatibility
multi_async = MultiFuture
| bsd-3-clause |
jmlopez-rod/promus | promus/core/__init__.py | 1 | 5429 | """ Promus core
This package contains the object that executes commands on your
behave as well as other utility functions.
"""
import re
import os
import sys
import socket
from promus.command import exec_cmd, date
from promus.core.ssh import (
make_key,
get_keys,
get_public_key,
read_config,
write_config,
read_authorized_keys,
write_authorized_keys,
)
from promus.core.git import (
config,
describe,
repo_name,
local_path,
remote_path,
make_hook,
init,
parse_dir,
parse_acl,
check_acl,
read_acl,
parse_profile,
check_profile,
read_profile,
file_in_path,
has_access,
file_match,
clone,
)
from promus.core.util import (
is_exe,
external_executables,
check_promus_dependencies,
user_input,
encrypt_to_file,
decrypt_from_file,
wrap_msg,
make_dir,
parse_list,
tokenizer,
merge_lines,
strip,
send_mail,
)
# pylint: disable=R0902
class Promus(object):
"An instance of this object manages the commands issued over ssh. "
def __init__(self):
# Host information
self.host = socket.gethostname()
self.alias = config('host.alias')
self.home = os.environ['HOME']
self.master = os.environ['USER']
self.master_name = config('user.name')
self.master_email = config('user.email')
# Guest information
self.guest = None
self.guest_name = None
self.guest_email = None
self.guest_alias = None
self.cmd = None
self.cmd_token = None
self.cmd_name = None
# Setting up log file
self.path = '%s/.promus' % self.home
make_dir(self.path)
self.log_file = open('%s/promus.log' % self.path, 'a')
# Setting up functions based on command name
self._exec = dict()
self._exec['git-receive-pack'] = exec_git
self._exec['git-upload-pack'] = exec_git
def log(self, msg):
"Write a message to the log file. "
sys.stderr.write("[PROMUS]: %s\n" % msg)
msg = '[%s:~ %s]$ %s\n' % (date(True), self.guest, msg)
self.log_file.write(msg)
def dismiss(self, msg, status):
"""Print msg to the standard error stream (sys.stderr), as
well as to the log file and exit with the provided status. """
self.log(msg)
self.log_file.close()
exit(status)
def execute(self, cmd_name):
"""Execute one of the commands promus is allowed to execute.
These are defined in the init function. If a command is not
there then the deny function will be executed instead. """
self._exec.get(cmd_name, deny)(self)
def exec_cmd(self, cmd, verbose=False):
"""Run a subprocess and return its output, errors and exit
code. It also stores the information about the guest who
executed the command inside the file
`~/.promus/promus.last`."""
self.log("EXEC>> %s" % cmd)
with open('%s/promus.last' % self.path, 'w') as tmpf:
tmpf.write("%s\n" % self.guest_email)
tmpf.write("%s\n" % self.guest)
tmpf.write("%s\n" % self.guest_name)
tmpf.write("%s\n" % self.guest_alias)
tmpf.write("%s" % self.cmd)
return exec_cmd(cmd, verbose)
def attend_last(self):
"""Reads the file containing the last guest and sets the
guest info in order to proceed writing logs with that name.
"""
with open('%s/promus.last' % self.path, 'r') as tmpf:
info = tmpf.read()
[self.guest_email, self.guest, self.guest_name,
self.guest_alias, self.cmd] = info.split('\n')
self.cmd_token = self.cmd.split()
self.cmd_name = self.cmd_token[0]
def _get_cmd(self):
"Check to see if a command was given. Exit if it is not present. "
if 'SSH_ORIGINAL_COMMAND' not in os.environ:
msg = "GET_CMD-ERROR>> SSH_ORIGINAL_COMMAND not found."
self.dismiss(msg, 1)
self.cmd = os.environ['SSH_ORIGINAL_COMMAND']
pattern = re.compile('.*?[;&|]')
if pattern.search(self.cmd):
msg = "GET_CMD-ERROR>> More than one command: %s" % self.cmd
self.dismiss(msg, 1)
self.cmd_token = self.cmd.split()
self.cmd_name = self.cmd_token[0]
def greet(self, info):
"Handle the guest request. "
[self.guest_email, self.guest,
self.guest_name, self.guest_alias] = info.split(',')
self.log("GREET>> Connected as %s" % self.guest_email)
self._get_cmd()
if self.guest_email == self.master_email:
self.exec_cmd(self.cmd, True)
else:
self.execute(self.cmd_name)
self.dismiss("GREET>> done ...", 0)
def deny(prs):
"Promus object default action. "
msg = "EXEC-ERROR>> Not enough permissions to run: '%s'" % prs.cmd
prs.dismiss(msg, 1)
def exec_git(prs):
"""Executes a git command. """
git_dir = os.path.expanduser(prs.cmd_token[1][1:-1])
acl = read_acl(git_dir)
if isinstance(acl, str):
msg = "EXEC_GIT-ERROR>> acl error: %s" % acl
prs.dismiss(msg, 1)
if prs.guest in acl['user']: # acl['user'] contains acl['admin']
prs.exec_cmd(prs.cmd, True)
else:
msg = "EXEC_GIT-ERROR>> not in acl for `%s`" % git_dir
prs.dismiss(msg, 1)
| bsd-3-clause |
dimagi/django-digest | django_digest/__init__.py | 1 | 6603 | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import random
import time
from django.http import HttpResponse
from django.contrib.auth.signals import user_login_failed
import python_digest
from django_digest.utils import get_backend, get_setting, DEFAULT_REALM
import six
from six.moves import range
_l = logging.getLogger(__name__)
_l.setLevel(logging.DEBUG)
class DefaultLoginFactory(object):
def confirmed_logins_for_user(self, user):
return [login for login in
[user.username, user.username.lower(), user.email,
user.email and user.email.lower()] if login]
def unconfirmed_logins_for_user(self, user):
return []
class NoEmailLoginFactory(object):
def confirmed_logins_for_user(self, user):
return [login for login in
[user.username, user.username.lower()] if login]
def unconfirmed_logins_for_user(self, user):
return []
def _send_fail_signal(request, username):
user_login_failed.send(
sender=__name__,
credentials={'username': username},
request=request)
class HttpDigestAuthenticator(object):
def __init__(self,
account_storage=None,
nonce_storage=None,
realm=None,
timeout=None,
enforce_nonce_count=None,
failure_callback=_send_fail_signal):
if not enforce_nonce_count == None:
self._enforce_nonce_count = enforce_nonce_count
else:
self._enforce_nonce_count = get_setting('DIGEST_ENFORCE_NONCE_COUNT', True)
self.realm = realm or get_setting('DIGEST_REALM', DEFAULT_REALM)
self.timeout = timeout or get_setting('DIGEST_NONCE_TIMEOUT_IN_SECONDS', 5*60)
self._account_storage = (account_storage or get_backend(
'DIGEST_ACCOUNT_BACKEND', 'django_digest.backend.storage.AccountStorage'))
self._nonce_storage = (nonce_storage or get_backend(
'DIGEST_NONCE_BACKEND', 'django_digest.backend.storage.NonceStorage'))
self.secret_key = get_setting('SECRET_KEY')
self.failure_callback = failure_callback
@staticmethod
def contains_digest_credentials(request):
return ('HTTP_AUTHORIZATION' in request.META and
python_digest.is_digest_credential(request.META['HTTP_AUTHORIZATION']))
def _store_nonce(self, user, nonce, nonce_count):
if self._enforce_nonce_count:
return self._nonce_storage.store_nonce(user, nonce, nonce_count)
else:
return self._nonce_storage.store_nonce(user, nonce, None)
def _update_existing_nonce(self, user, nonce, nonce_count):
if self._enforce_nonce_count:
return self._nonce_storage.update_existing_nonce(user, nonce, nonce_count)
else:
return self._nonce_storage.update_existing_nonce(user, nonce, None)
def authenticate(self, request):
if not 'HTTP_AUTHORIZATION' in request.META:
return False
if not python_digest.is_digest_credential(request.META['HTTP_AUTHORIZATION']):
return False
try:
if not isinstance(request.META['HTTP_AUTHORIZATION'], six.text_type):
request.META['HTTP_AUTHORIZATION'].decode('utf-8')
except UnicodeDecodeError:
return False
digest_response = python_digest.parse_digest_credentials(
request.META['HTTP_AUTHORIZATION'])
if not digest_response:
_l.debug('authentication failure: supplied digest credentials could not be ' \
'parsed: "%s".' % request.META['HTTP_AUTHORIZATION'])
return False
if not digest_response.realm == self.realm:
_l.debug('authentication failure: supplied realm "%s" does not match ' \
'configured realm "%s".' % ( digest_response.realm, self.realm))
return False
if not python_digest.validate_nonce(digest_response.nonce, self.secret_key):
_l.debug('authentication failure: nonce validation failed.')
return False
partial_digest = self._account_storage.get_partial_digest(digest_response.username)
if not partial_digest:
_l.debug('authentication failure: no partial digest available for user "%s".' \
% digest_response.username)
return False
calculated_request_digest = python_digest.calculate_request_digest(
method=request.method, digest_response=digest_response,
partial_digest=partial_digest)
if not calculated_request_digest == digest_response.response:
_l.debug('authentication failure: supplied request digest does not match ' \
'calculated request digest.')
if self.failure_callback:
self.failure_callback(request, digest_response.username)
return False
if not python_digest.validate_uri(digest_response.uri, request.path):
_l.debug('authentication failure: digest authentication uri value "%s" does not ' \
'match value "%s" from HTTP request line.' % (digest_response.uri,
request.path))
return False
user = self._account_storage.get_user(digest_response.username)
if not self._update_existing_nonce(user, digest_response.nonce, digest_response.nc):
if (python_digest.get_nonce_timestamp(digest_response.nonce) + self.timeout <
time.time()):
_l.debug('authentication failure: attempt to establish a new session with ' \
'a stale nonce.')
return False
if not self._store_nonce(user, digest_response.nonce, digest_response.nc):
_l.debug('authentication failure: attempt to establish a previously used ' \
'or nonce count.')
return False
request.user = user
return True
def build_challenge_response(self, stale=False):
response = HttpResponse('Authorization Required',
content_type='text/plain', status=401)
opaque = ''.join([random.choice('0123456789ABCDEF') for x in range(32)])
response["WWW-Authenticate"] = python_digest.build_digest_challenge(
time.time(), self.secret_key, self.realm, opaque, stale)
return response
| bsd-3-clause |
amarant/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_stream_hixie75.py | 496 | 2285 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for stream module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.stream import StreamHixie75
from test.test_msgutil import _create_request_hixie75
class StreamHixie75Test(unittest.TestCase):
"""A unittest for StreamHixie75 class."""
def test_payload_length(self):
for length, bytes in ((0, '\x00'), (0x7f, '\x7f'), (0x80, '\x81\x00'),
(0x1234, '\x80\xa4\x34')):
test_stream = StreamHixie75(_create_request_hixie75(bytes))
self.assertEqual(
length, test_stream._read_payload_length_hixie75())
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
alshedivat/tensorflow | tensorflow/python/data/experimental/kernel_tests/sql_dataset_test_base.py | 8 | 4409 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing `tf.data.experimental.SqlDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sqlite3
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SqlDatasetTestBase(test_base.DatasetTestBase):
"""Base class for setting up and testing SqlDataset."""
def _createSqlDataset(self, output_types, num_repeats=1):
dataset = readers.SqlDataset(self.driver_name, self.data_source_name,
self.query, output_types).repeat(num_repeats)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
return init_op, get_next
def setUp(self):
self.data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
self.driver_name = array_ops.placeholder_with_default(
array_ops.constant("sqlite", dtypes.string), shape=[])
self.query = array_ops.placeholder(dtypes.string, shape=[])
conn = sqlite3.connect(self.data_source_name)
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS students")
c.execute("DROP TABLE IF EXISTS people")
c.execute("DROP TABLE IF EXISTS townspeople")
c.execute(
"CREATE TABLE IF NOT EXISTS students (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), motto VARCHAR(100), "
"school_id VARCHAR(100), favorite_nonsense_word VARCHAR(100), "
"desk_number INTEGER, income INTEGER, favorite_number INTEGER, "
"favorite_big_number INTEGER, favorite_negative_number INTEGER, "
"favorite_medium_sized_number INTEGER, brownie_points INTEGER, "
"account_balance INTEGER, registration_complete INTEGER)")
c.executemany(
"INSERT INTO students (first_name, last_name, motto, school_id, "
"favorite_nonsense_word, desk_number, income, favorite_number, "
"favorite_big_number, favorite_negative_number, "
"favorite_medium_sized_number, brownie_points, account_balance, "
"registration_complete) "
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
[("John", "Doe", "Hi!", "123", "n\0nsense", 9, 0, 2147483647,
9223372036854775807, -2, 32767, 0, 0, 1),
("Jane", "Moe", "Hi again!", "1000", "nonsense\0", 127, -20000,
-2147483648, -9223372036854775808, -128, -32768, 255, 65535, 0)])
c.execute(
"CREATE TABLE IF NOT EXISTS people (id INTEGER NOT NULL PRIMARY KEY, "
"first_name VARCHAR(100), last_name VARCHAR(100), state VARCHAR(100))")
c.executemany(
"INSERT INTO PEOPLE (first_name, last_name, state) VALUES (?, ?, ?)",
[("Benjamin", "Franklin", "Pennsylvania"), ("John", "Doe",
"California")])
c.execute(
"CREATE TABLE IF NOT EXISTS townspeople (id INTEGER NOT NULL PRIMARY "
"KEY, first_name VARCHAR(100), last_name VARCHAR(100), victories "
"FLOAT, accolades FLOAT, triumphs FLOAT)")
c.executemany(
"INSERT INTO townspeople (first_name, last_name, victories, "
"accolades, triumphs) VALUES (?, ?, ?, ?, ?)",
[("George", "Washington", 20.00,
1331241.321342132321324589798264627463827647382647382643874,
9007199254740991.0),
("John", "Adams", -19.95,
1331241321342132321324589798264627463827647382647382643874.0,
9007199254740992.0)])
conn.commit()
conn.close()
| apache-2.0 |
zhangjunfang/eclipse-dir | foundation-springMVC/src/main/resources/views/scripts/lib/fckeditor/editor/filemanager/connectors/py/fckoutput.py | 46 | 3847 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| bsd-2-clause |
batisteo/django-countries-french | django_countries/tests/fields.py | 4 | 1566 | from django.test import TestCase
from django_countries import settings
class TestCountryField(TestCase):
def create_person(self, country='NZ'):
from django_countries.tests import Person
return Person.objects.create(name='Chris Beaven', country=country)
def test_logic(self):
person = self.create_person()
self.assertEqual(person.country, 'NZ')
self.assertNotEqual(person.country, 'ZZ')
self.assert_(person.country < 'OA')
self.assert_(person.country > 'NY')
self.assert_(person.country)
person.country = ''
self.assertFalse(person.country)
def test_unicode(self):
person = self.create_person()
self.assertEqual(unicode(person.country), 'NZ')
def test_name(self):
person = self.create_person()
self.assertEqual(person.country.name, u'New Zealand')
def test_flag(self):
person = self.create_person()
expected_url = settings.FLAG_URL % {'code': 'nz', 'code_upper': 'NZ'}
self.assertEqual(person.country.flag, expected_url)
def test_blank(self):
from django_countries.tests import Person
person = self.create_person(country=None)
self.assertEqual(person.country, '')
person = Person.objects.get(pk=person.pk)
self.assertEqual(person.country, '')
def test_len(self):
person = self.create_person()
self.assertEqual(len(person.country), 2)
person = self.create_person(country=None)
self.assertEqual(len(person.country), 0)
| mit |
zhoulingjun/django | tests/gis_tests/gdal_tests/test_geom.py | 256 | 20748 | import json
import unittest
from binascii import b2a_hex
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.utils.six.moves import range
from ..test_data import TestDataMixin
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
if HAS_GDAL:
from django.contrib.gis.gdal import (OGRGeometry, OGRGeomType,
GDALException, OGRIndexError, SpatialReference, CoordTransform,
GDAL_VERSION)
@skipUnless(HAS_GDAL, "GDAL is required")
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
OGRGeomType(1)
OGRGeomType(7)
OGRGeomType('point')
OGRGeomType('GeometrycollectioN')
OGRGeomType('LINearrING')
OGRGeomType('Unknown')
# Should throw TypeError on this input
self.assertRaises(GDALException, OGRGeomType, 23)
self.assertRaises(GDALException, OGRGeomType, 'fooD')
self.assertRaises(GDALException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(OGRGeomType(1), OGRGeomType(1))
self.assertEqual(OGRGeomType(7), 'GeometryCollection')
self.assertEqual(OGRGeomType('point'), 'POINT')
self.assertNotEqual(OGRGeomType('point'), 2)
self.assertEqual(OGRGeomType('unknown'), 0)
self.assertEqual(OGRGeomType(6), 'MULtiPolyGON')
self.assertEqual(OGRGeomType(1), OGRGeomType('point'))
self.assertNotEqual(OGRGeomType('POINT'), OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Geometry').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertIsNone(OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertEqual(OGRGeomType(wkb25bit + 1), 'Point25D')
self.assertEqual(OGRGeomType('MultiLineString25D'), (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex.encode(), geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex.encode())
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test_json(self):
"Testing GeoJSON input/output."
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
# Test input with some garbage content (but valid json) (#15529)
geom = OGRGeometry('{"type": "Point", "coordinates": [ 100.0, 0.0 ], "other": "<test>"}')
self.assertIsInstance(geom, OGRGeometry)
def test_points(self):
"Testing Point objects."
OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(linestr, OGRGeometry(ls.wkt))
self.assertNotEqual(linestr, prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(mlinestr, OGRGeometry(mls.wkt))
self.assertNotEqual(mlinestr, prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
# self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(lr, OGRGeometry(rr.wkt))
self.assertNotEqual(lr, prev)
prev = lr
def test_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = OGRGeometry.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(poly, OGRGeometry(p.wkt))
self.assertNotEqual(poly, prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
with self.assertRaises(GDALException):
poly.centroid
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test_multipolygons(self):
"Testing MultiPolygon objects."
OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolygon after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertTrue(a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(GDALException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3):
self.assertEqual(mpoly, tmp)
def test_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertIsNotNone(OGRGeometry('POINT(0 0)'))
self.assertNotEqual(OGRGeometry('LINESTRING(0 0, 1 1)'), 3)
| bsd-3-clause |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/graph_editor/__init__.py | 112 | 1658 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Graph Editor.
See the @{$python/contrib.graph_editor} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.graph_editor.edit import *
from tensorflow.contrib.graph_editor.reroute import *
from tensorflow.contrib.graph_editor.select import *
from tensorflow.contrib.graph_editor.subgraph import *
from tensorflow.contrib.graph_editor.transform import *
from tensorflow.contrib.graph_editor.util import *
# pylint: enable=wildcard-import
# some useful aliases
# pylint: disable=g-bad-import-order
from tensorflow.contrib.graph_editor import subgraph as _subgraph
from tensorflow.contrib.graph_editor import util as _util
# pylint: enable=g-bad-import-order
ph = _util.make_placeholder_from_dtype_and_shape
sgv = _subgraph.make_view
sgv_scope = _subgraph.make_view_from_scope
del absolute_import
del division
del print_function
| mit |
kampanita/pelisalacarta | python/main-classic/lib/requests/packages/chardet/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| gpl-3.0 |
mitocw/edx-platform | common/djangoapps/entitlements/api/v1/filters.py | 3 | 1208 |
from django_filters import rest_framework as filters
from entitlements.models import CourseEntitlement
class CharListFilter(filters.CharFilter):
""" Filters a field via a comma-delimited list of values. """
def filter(self, qs, value):
if value not in (None, ''):
value = value.split(',')
return super(CharListFilter, self).filter(qs, value)
class UUIDListFilter(CharListFilter):
""" Filters a field via a comma-delimited list of UUIDs. """
def __init__(self, field_name='uuid', label=None, widget=None, method=None, lookup_expr='in', required=False,
distinct=False, exclude=False, **kwargs):
super(UUIDListFilter, self).__init__(
field_name=field_name,
label=label,
widget=widget,
method=method,
lookup_expr=lookup_expr,
required=required,
distinct=distinct,
exclude=exclude,
**kwargs
)
class CourseEntitlementFilter(filters.FilterSet):
uuid = UUIDListFilter()
user = filters.CharFilter(field_name='user__username')
class Meta:
model = CourseEntitlement
fields = ('uuid', 'user')
| agpl-3.0 |
SilverIce/JContainers | dep/boost/tools/build/src/tools/stage.py | 17 | 13457 | # Status: ported.
# Base revision 64444.
#
# Copyright 2003 Dave Abrahams
# Copyright 2005, 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006, 2010 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This module defines the 'install' rule, used to copy a set of targets to a
# single location.
import b2.build.feature as feature
import b2.build.targets as targets
import b2.build.property as property
import b2.build.property_set as property_set
import b2.build.generators as generators
import b2.build.virtual_target as virtual_target
from b2.manager import get_manager
from b2.util.sequence import unique
from b2.util import bjam_signature
import b2.build.type
import os.path
import re
import types
feature.feature('install-dependencies', ['off', 'on'], ['incidental'])
feature.feature('install-type', [], ['free', 'incidental'])
feature.feature('install-source-root', [], ['free', 'path'])
feature.feature('so-version', [], ['free', 'incidental'])
# If 'on', version symlinks for shared libraries will not be created. Affects
# Unix builds only.
feature.feature('install-no-version-symlinks', ['on'], ['optional', 'incidental'])
class InstallTargetClass(targets.BasicTarget):
def update_location(self, ps):
"""If <location> is not set, sets it based on the project data."""
loc = ps.get('location')
if not loc:
loc = os.path.join(self.project().get('location'), self.name())
ps = ps.add_raw(["<location>" + loc])
return ps
def adjust_properties(self, target, build_ps):
a = target.action()
properties = []
if a:
ps = a.properties()
properties = ps.all()
# Unless <hardcode-dll-paths>true is in properties, which can happen
# only if the user has explicitly requested it, nuke all <dll-path>
# properties.
if build_ps.get('hardcode-dll-paths') != ['true']:
properties = [p for p in properties if p.feature().name() != 'dll-path']
# If any <dll-path> properties were specified for installing, add
# them.
properties.extend(build_ps.get_properties('dll-path'))
# Also copy <linkflags> feature from current build set, to be used
# for relinking.
properties.extend(build_ps.get_properties('linkflags'))
# Remove the <tag> feature on original targets.
# And <location>. If stage target has another stage target in
# sources, then we shall get virtual targets with the <location>
# property set.
properties = [p for p in properties
if not p.feature().name() in ['tag', 'location']]
properties.extend(build_ps.get_properties('dependency'))
properties.extend(build_ps.get_properties('location'))
properties.extend(build_ps.get_properties('install-no-version-symlinks'))
d = build_ps.get_properties('install-source-root')
# Make the path absolute: we shall use it to compute relative paths and
# making the path absolute will help.
if d:
p = d[0]
properties.append(property.Property(p.feature(), os.path.abspath(p.value())))
return property_set.create(properties)
def construct(self, name, source_targets, ps):
source_targets = self.targets_to_stage(source_targets, ps)
ps = self.update_location(ps)
ename = ps.get('name')
if ename:
ename = ename[0]
if ename and len(source_targets) > 1:
get_manager().errors()("When <name> property is used in 'install', only one source is allowed")
result = []
for i in source_targets:
staged_targets = []
new_ps = self.adjust_properties(i, ps)
# See if something special should be done when staging this type. It
# is indicated by the presence of a special "INSTALLED_" type.
t = i.type()
if t and b2.build.type.registered("INSTALLED_" + t):
if ename:
get_manager().errors()("In 'install': <name> property specified with target that requires relinking.")
else:
(r, targets) = generators.construct(self.project(), name, "INSTALLED_" + t,
new_ps, [i])
assert isinstance(r, property_set.PropertySet)
staged_targets.extend(targets)
else:
staged_targets.append(copy_file(self.project(), ename, i, new_ps))
if not staged_targets:
get_manager().errors()("Unable to generate staged version of " + i)
result.extend(get_manager().virtual_targets().register(t) for t in staged_targets)
return (property_set.empty(), result)
def targets_to_stage(self, source_targets, ps):
"""Given the list of source targets explicitly passed to 'stage', returns the
list of targets which must be staged."""
result = []
# Traverse the dependencies, if needed.
if ps.get('install-dependencies') == ['on']:
source_targets = self.collect_targets(source_targets)
# Filter the target types, if needed.
included_types = ps.get('install-type')
for r in source_targets:
ty = r.type()
if ty:
# Do not stage searched libs.
if ty != "SEARCHED_LIB":
if included_types:
if self.include_type(ty, included_types):
result.append(r)
else:
result.append(r)
elif not included_types:
# Don't install typeless target if there is an explicit list of
# allowed types.
result.append(r)
return result
# CONSIDER: figure out why we can not use virtual-target.traverse here.
#
def collect_targets(self, targets):
s = [t.creating_subvariant() for t in targets]
s = unique(filter(lambda l: l != None,s))
result = set(targets)
for i in s:
i.all_referenced_targets(result)
result2 = []
for r in result:
if isinstance(r, property.Property):
if r.feature().name() != 'use':
result2.append(r.value())
else:
result2.append(r)
result2 = unique(result2)
return result2
# Returns true iff 'type' is subtype of some element of 'types-to-include'.
#
def include_type(self, type, types_to_include):
return any(b2.build.type.is_subtype(type, ti) for ti in types_to_include)
# Creates a copy of target 'source'. The 'properties' object should have a
# <location> property which specifies where the target must be placed.
#
def copy_file(project, name, source, ps):
if not name:
name = source.name()
relative = ""
new_a = virtual_target.NonScanningAction([source], "common.copy", ps)
source_root = ps.get('install-source-root')
if source_root:
source_root = source_root[0]
# Get the real path of the target. We probably need to strip relative
# path from the target name at construction.
path = os.path.join(source.path(), os.path.dirname(name))
# Make the path absolute. Otherwise, it would be hard to compute the
# relative path. The 'source-root' is already absolute, see the
# 'adjust-properties' method above.
path = os.path.abspath(path)
relative = os.path.relpath(path, source_root)
name = os.path.join(relative, os.path.basename(name))
return virtual_target.FileTarget(name, source.type(), project, new_a, exact=True)
def symlink(name, project, source, ps):
a = virtual_target.Action([source], "symlink.ln", ps)
return virtual_target.FileTarget(name, source.type(), project, a, exact=True)
def relink_file(project, source, ps):
action = source[0].action()
cloned_action = virtual_target.clone_action(action, project, "", ps)
targets = cloned_action.targets()
# We relink only on Unix, where exe or shared lib is always a single file.
assert len(targets) == 1
return targets[0]
# Declare installed version of the EXE type. Generator for this type will cause
# relinking to the new location.
b2.build.type.register('INSTALLED_EXE', [], 'EXE')
class InstalledExeGenerator(generators.Generator):
def __init__(self):
generators.Generator.__init__(self, "install-exe", False, ['EXE'], ['INSTALLED_EXE'])
def run(self, project, name, ps, source):
need_relink = False;
if ps.get('os') in ['NT', 'CYGWIN'] or ps.get('target-os') in ['windows', 'cygwin']:
# Never relink
pass
else:
# See if the dll-path properties are not changed during
# install. If so, copy, don't relink.
need_relink = source[0].action() and ps.get('dll-path') != source[0].action().properties().get('dll-path')
if need_relink:
return [relink_file(project, source, ps)]
else:
return [copy_file(project, None, source[0], ps)]
generators.register(InstalledExeGenerator())
# Installing a shared link on Unix might cause a creation of versioned symbolic
# links.
b2.build.type.register('INSTALLED_SHARED_LIB', [], 'SHARED_LIB')
class InstalledSharedLibGenerator(generators.Generator):
def __init__(self):
generators.Generator.__init__(self, 'install-shared-lib', False, ['SHARED_LIB'], ['INSTALLED_SHARED_LIB'])
def run(self, project, name, ps, source):
source = source[0]
if ps.get('os') in ['NT', 'CYGWIN'] or ps.get('target-os') in ['windows', 'cygwin']:
copied = copy_file(project, None, source, ps)
return [get_manager().virtual_targets().register(copied)]
else:
a = source.action()
if not a:
# Non-derived file, just copy.
copied = copy_file(project, None, source, ps)
else:
need_relink = ps.get('dll-path') != source.action().properties().get('dll-path')
if need_relink:
# Rpath changed, need to relink.
copied = relink_file(project, source, ps)
else:
copied = copy_file(project, None, source, ps)
result = [get_manager().virtual_targets().register(copied)]
# If the name is in the form NNN.XXX.YYY.ZZZ, where all 'X', 'Y' and
# 'Z' are numbers, we need to create NNN.XXX and NNN.XXX.YYY
# symbolic links.
m = re.match("(.*)\\.([0123456789]+)\\.([0123456789]+)\\.([0123456789]+)$",
copied.name());
if m:
# Symlink without version at all is used to make
# -lsome_library work.
result.append(symlink(m.group(1), project, copied, ps))
# Symlinks of some libfoo.N and libfoo.N.M are used so that
# library can found at runtime, if libfoo.N.M.X has soname of
# libfoo.N. That happens when the library makes some binary
# compatibility guarantees. If not, it is possible to skip those
# symlinks.
if ps.get('install-no-version-symlinks') != ['on']:
result.append(symlink(m.group(1) + '.' + m.group(2), project, copied, ps))
result.append(symlink(m.group(1) + '.' + m.group(2) + '.' + m.group(3),
project, copied, ps))
return result
generators.register(InstalledSharedLibGenerator())
# Main target rule for 'install'.
#
@bjam_signature((["name"], ["sources", "*"], ["requirements", "*"],
["default_build", "*"], ["usage_requirements", "*"]))
def install(name, sources, requirements=[], default_build=[], usage_requirements=[]):
requirements = requirements[:]
# Unless the user has explicitly asked us to hardcode dll paths, add
# <hardcode-dll-paths>false in requirements, to override default value.
if not '<hardcode-dll-paths>true' in requirements:
requirements.append('<hardcode-dll-paths>false')
if any(r.startswith('<tag>') for r in requirements):
get_manager().errors()("The <tag> property is not allowed for the 'install' rule")
from b2.manager import get_manager
t = get_manager().targets()
project = get_manager().projects().current()
return t.main_target_alternative(
InstallTargetClass(name, project,
t.main_target_sources(sources, name),
t.main_target_requirements(requirements, project),
t.main_target_default_build(default_build, project),
t.main_target_usage_requirements(usage_requirements, project)))
get_manager().projects().add_rule("install", install)
get_manager().projects().add_rule("stage", install)
| mit |
jostep/tensorflow | tensorflow/contrib/metrics/__init__.py | 32 | 6185 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for evaluation metrics and summary statistics.
See the @{$python/contrib.metrics} guide.
@@streaming_accuracy
@@streaming_mean
@@streaming_recall
@@streaming_recall_at_thresholds
@@streaming_precision
@@streaming_precision_at_thresholds
@@streaming_auc
@@streaming_curve_points
@@streaming_recall_at_k
@@streaming_mean_absolute_error
@@streaming_mean_iou
@@streaming_mean_relative_error
@@streaming_mean_squared_error
@@streaming_mean_tensor
@@streaming_root_mean_squared_error
@@streaming_covariance
@@streaming_pearson_correlation
@@streaming_mean_cosine_distance
@@streaming_percentage_less
@@streaming_sensitivity_at_specificity
@@streaming_sparse_average_precision_at_k
@@streaming_sparse_average_precision_at_top_k
@@streaming_sparse_precision_at_k
@@streaming_sparse_precision_at_top_k
@@streaming_sparse_recall_at_k
@@streaming_specificity_at_sensitivity
@@streaming_concat
@@streaming_false_negatives
@@streaming_false_negatives_at_thresholds
@@streaming_false_positives
@@streaming_false_positives_at_thresholds
@@streaming_true_negatives
@@streaming_true_negatives_at_thresholds
@@streaming_true_positives
@@streaming_true_positives_at_thresholds
@@auc_using_histogram
@@accuracy
@@aggregate_metrics
@@aggregate_metric_map
@@confusion_matrix
@@set_difference
@@set_intersection
@@set_size
@@set_union
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
from tensorflow.contrib.metrics.python.metrics import *
# pylint: enable=wildcard-import
from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix
from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.set_ops import set_difference
from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection
from tensorflow.contrib.metrics.python.ops.set_ops import set_size
from tensorflow.contrib.metrics.python.ops.set_ops import set_union
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
ASCrookes/django | django/utils/decorators.py | 60 | 5921 | "Functions that help with dynamically creating decorators for views."
try:
from contextlib import ContextDecorator
except ImportError:
ContextDecorator = None
from functools import WRAPPER_ASSIGNMENTS, update_wrapper, wraps
from django.utils import six
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the class, not on instances.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
# 'func' is a function at the time it is passed to _dec, but will eventually
# be a method of the class it is defined on.
def _dec(func):
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func.__get__(self, type(self))(*args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
return _wrapper
update_wrapper(_dec, decorator, assigned=available_attrs(decorator))
# Change the name to aid debugging.
if hasattr(decorator, '__name__'):
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
else:
_dec.__name__ = 'method_decorator(%s)' % decorator.__class__.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
if six.PY3:
return WRAPPER_ASSIGNMENTS
else:
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception as e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
callback = lambda response: middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
if ContextDecorator is None:
# ContextDecorator was introduced in Python 3.2
# See https://docs.python.org/3/library/contextlib.html#contextlib.ContextDecorator
class ContextDecorator(object):
"""
A base class that enables a context manager to also be used as a decorator.
"""
def __call__(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
class classproperty(object):
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, owner):
return self.fget(owner)
def getter(self, method):
self.fget = method
return self
| bsd-3-clause |
gquirozbogner/contentbox-master | third_party/django/conf/locale/pl/formats.py | 118 | 1148 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| apache-2.0 |
ajrbyers/smw | src/core/logic.py | 2 | 66344 | import os
from builtins import any as string_any
import datetime
import json
import mimetypes
import re
from uuid import uuid4
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.mail import EmailMultiAlternatives
from django.http import StreamingHttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.db.models import Max, Q
from django.shortcuts import get_object_or_404, redirect, render
from django.template import Context
from django.template.loader import get_template
from django.utils import timezone
from django.utils.encoding import smart_text
from docx import Document
from pymarc import Record, Field, record_to_xml
from core import email, models, log, forms
from core.cache import cache_result
from core.decorators import is_reviewer
from core.files import handle_marc21_file
from core.util import (
get_setting,
strip_html_tags,
)
from editorialreview import models as editorialreview_models
from manager import forms as manager_forms
from review import forms as review_forms, models as review_models, \
logic as review_logic
from revisions import models as revisions_models
from submission import logic as submission_logic, models as submission_models
def record_field(tag, indicators, subfields):
return Field(tag=tag, indicators=indicators, subfields=subfields)
def record_control_field(tag, field):
return Field(tag=tag, data=field)
def book_to_mark21_file(book, owner, xml=False):
"""
Number and value explanation:
http://www.loc.gov/marc/bibliographic/bdleader.html
Adding Leader tags.
"""
record = Record() # New record.
leader_list = list(record.leader)
leader_list[5] = 'n' # New
leader_list[6] = 'a' # For manuscript file use 't'
leader_list[7] = 'm' # Monograph
leader_list[9] = 'a'
leader_list[19] = '#'
record.leader = "".join(leader_list)
# Category of material - Text
record.add_field(record_control_field('007', 't'))
# Languages
languages = book.languages.all()
if languages:
for lang in languages:
record.add_field(record_control_field('008', lang.code))
else:
record.add_field(record_control_field('008', 'eng'))
# ISBN - International Standard Book Number.
isbn = models.Identifier.objects.filter(
book=book
).exclude(
identifier='pub_id'
).exclude(
identifier='urn'
).exclude(
identifier='doi'
)
for identifier in isbn:
if book.book_type:
record.add_field(
record_field(
'020',
['#', '#'],
['a', str(identifier.value) + ' ' + book.book_type]
)
)
else:
record.add_field(
record_field(
'020',
['#', '#'],
['a', str(identifier.value)]
)
)
# Source of acquisition.
base_url = get_setting('base_url', 'general')
book_url = 'http://%s/editor/submission/%s/' % (base_url, book.id)
record.add_field(record_field('030', ['#', '#'], ['b', book_url]))
authors = book.author.all() # Main entry - Personal name.
author_names = ''
for author in authors:
author_names = author_names + author.full_name() + ' '
name = author.last_name + ', ' + author.first_name
if author.middle_name:
name = name + ' ' + author.middle_name[:1] + '.'
record.add_field(record_field('100', ['1', '#'], ['a', name]))
title_words = book.title.split(' ') # Title statement.
first_word = title_words[0]
if first_word.lower() == 'the':
record.add_field(
record_field(
'245',
['1', '4'],
['a', book.title, 'c', author_names]
)
)
else:
record.add_field(
record_field(
'245',
['1', '0'],
['a', book.title, 'c', author_names]
)
)
# Publication.
press_name = get_setting('press_name', 'general')
city = get_setting('city', 'general')
publication_info = []
if book.publication_date: # Press' city.
if city:
publication_info.append('a')
publication_info.append(city)
if press_name: # Press' name.
publication_info.append('b')
publication_info.append(press_name)
publication_info.append('c') # Date of Publication.
publication_info.append(str(book.publication_date))
record.add_field(record_field('260', ['#', '#'], publication_info))
if book.pages: # Physical details.
record.add_field(
record_field('300', ['#', '#'], ['a', str(book.pages) + ' pages'])
)
record.add_field( # Content type.
record_field('336', ['#', '#'], ['a', 'text', '2', 'rdacontent'])
)
record.add_field( # Media type.
record_field('337', ['#', '#'], ['a', 'unmediated', '2', 'rdamedia'])
)
record.add_field( # Carrier type.
record_field('338', ['#', '#'], ['a', 'volume', '2', 'rdacarrier'])
)
if languages: # Language note.
for lang in languages:
record.add_field(
record_field('546', ['#', '#'], ['a', lang.display]))
else:
record.add_field(record_field('546', ['#', '#'], ['a', 'In English']))
press_editors = book.press_editors.all()
for editor in press_editors: # Editors.
record.add_field(record_field(
'700',
['1', '#'],
['a', '%s, %s' % (editor.last_name,
editor.first_name), 'e', 'Press editor']))
if book.series: # Series.
record.add_field(
record_field('830', ['#', '0'], ['a', book.series.title]))
if book.series.editor:
record.add_field(record_field(
'700',
['1', '#'],
['a', '%s, %s' % (book.series.editor.last_name,
book.series.editor.first_name),
'e', 'Series editor']
))
title = book.title # Add record to file.
if not xml:
filename = 'book_' + str(book.id) + '_' + re.sub(
r'[^a-zA-Z0-9\n\.]', '', title.lower()
) + '_marc21.dat'
_file = handle_marc21_file(record.as_marc(), filename, book, owner)
else:
filename = 'book_' + str(book.id) + '_' + re.sub(
r'[^a-zA-Z0-9\n\.]', '', title.lower()
) + '_marc21.xml'
content = record_to_xml(record, quiet=False, namespace=False)
_file = handle_marc21_file(content, filename, book, owner)
return _file.pk
# add handle_file ?
def book_to_mark21_file_download_content(book, owner, content, xml=False):
title = book.title
if not content or content.isspace():
content = 'No content found.'
if not xml:
filename = 'book_' + str(book.id) + '_' + re.sub(
r'[^a-zA-Z0-9\n\.]', '', title.lower()
) + '_marc21.dat'
_file = handle_marc21_file(content, filename, book, owner)
else:
filename = 'book_' + str(book.id) + '_' + re.sub(
r'[^a-zA-Z0-9\n\.]', '', title.lower()
) + '_marc21.xml'
_file = handle_marc21_file(content, filename, book, owner)
return _file.pk
def book_to_mark21_file_content(book, owner, xml=False):
""" Number and value explanation:
http://www.loc.gov/marc/bibliographic/bdleader.html
"""
record = Record() # New record.
leader_list = list(record.leader) # Adding Leader tags.
leader_list[5] = 'n' # New
leader_list[6] = 'a' # For manuscript file use 't'
leader_list[7] = 'm' # Monograph
leader_list[9] = 'a'
leader_list[19] = '#'
record.leader = "".join(leader_list)
# Category of material - Text
record.add_field(record_control_field('007', 't'))
languages = book.languages.all() # Languages.
if languages:
for lang in languages:
record.add_field(record_control_field('008', lang.code))
else:
record.add_field(record_control_field('008', 'eng'))
isbn = models.Identifier.objects.filter(
# International Standard Book Number
book=book
).exclude(
identifier='pub_id'
).exclude(
identifier='urn'
).exclude(
identifier='doi'
)
for identifier in isbn:
if book.book_type:
record.add_field(record_field(
'020',
['#', '#'],
['a', str(identifier.value) + ' ' + book.book_type]
))
else:
record.add_field(
record_field('020', ['#', '#'], ['a', str(identifier.value)]))
# Source of acquisition.
base_url = get_setting('base_url', 'general', default='localhost:8000')
book_url = 'http://%s/editor/submission/%s/' % (base_url, book.id)
record.add_field(record_field('030', ['#', '#'], ['b', book_url]))
authors = book.author.all() # Main entry - Personal name.
author_names = ''
for author in authors:
author_names = author_names + author.full_name() + ' '
name = author.last_name + ', ' + author.first_name
if author.middle_name:
name = name + ' ' + author.middle_name[:1] + '.'
record.add_field(record_field('100', ['1', '#'], ['a', name]))
title_words = book.title.split(' ') # Title statement.
first_word = title_words[0]
if first_word.lower() == 'the':
record.add_field(record_field(
'245',
['1', '4'],
['a', book.title, 'c', author_names]
))
else:
record.add_field(record_field(
'245',
['1', '0'],
['a', book.title, 'c', author_names]
))
# Publication.
press_name = get_setting('press_name', 'general')
city = get_setting('city', 'general')
publication_info = []
if book.publication_date:
if city: # Press' city.
publication_info.append('a')
publication_info.append(str(city))
if press_name: # Press' name.
publication_info.append('b')
publication_info.append(str(press_name))
publication_info.append('c') # Date of Publication.
publication_info.append(str(book.publication_date))
record.add_field(record_field('260', ['#', '#'], publication_info))
if book.pages: # Physical details.
record.add_field(record_field(
'300',
['#', '#'],
['a', str(book.pages) + ' pages']
))
record.add_field(record_field( # Content type.
'336',
['#', '#'],
['a', 'text', '2', 'rdacontent']
))
record.add_field(record_field( # Media type.
'337',
['#', '#'],
['a', 'unmediated', '2', 'rdamedia']
))
record.add_field(record_field( # Carrier type.
'338',
['#', '#'],
['a', 'volume', '2', 'rdacarrier']
))
if languages: # Language note.
for lang in languages:
record.add_field(
record_field('546', ['#', '#'], ['a', lang.display])
)
else:
record.add_field(record_field('546', ['#', '#'], ['a', 'In English']))
press_editors = book.press_editors.all()
for editor in press_editors: # Editors.
record.add_field(record_field(
'700',
['1', '#'],
[
'a',
'%s, %s' % (
editor.last_name,
editor.first_name
),
'e',
'Press editor'
]
))
if book.series: # Series.
record.add_field(
record_field('830', ['#', '0'], ['a', book.series.title]))
if book.series.editor:
record.add_field(record_field(
'700',
['1', '#'],
[
'a', '%s, %s' % (
book.series.editor.last_name,
book.series.editor.first_name
),
'e',
'Series editor'
]
))
title = book.title # Add record to file.
if not xml:
filename = 'book_' + str(book.id) + '_' + re.sub(
r'[^a-zA-Z0-9\n\.]', '', title.lower()
) + '_marc21.dat'
handle_marc21_file(record.as_marc(), filename, book, owner)
content = record.as_marc()
else:
filename = 'book_' + str(book.id) + '_' + re.sub(
r'[^a-zA-Z0-9\n\.]', '', title.lower()
) + '_marc21.xml'
content = record_to_xml(record, quiet=False, namespace=False)
handle_marc21_file(content, filename, book, owner)
return content or None
def get_author_emails(submission_id, term):
submission = get_object_or_404(models.Book, pk=submission_id)
authors = submission.author.all()
results = []
for author in authors:
name = author.full_name()
author_json = {
'id': author.id,
'label': author.full_name(),
'value': author.author_email
}
if term:
if term.lower() in name.lower():
results.append(author_json)
return results
def get_editor_emails(submission_id, term):
submission = get_object_or_404(models.Book, pk=submission_id)
editors = get_editors(submission)
results = []
for editor in editors:
if hasattr(editor, 'full_name'):
name = editor.full_name()
else:
name = '{first_name} {last_name}'.format(
first_name=editor.first_name,
last_name=editor.last_name
)
editor_json = {'id': editor.id, 'label': name}
if hasattr(editor, 'author_email'):
editor_json['value'] = editor.author_email
else:
editor_json['value'] = editor.email
if term:
if term.lower() in name.lower():
results.append(editor_json)
return results
def get_all_user_emails(term):
users = User.objects.all()
results = []
for user in users:
if user.profile and hasattr(user.profile, 'full_name'):
name = user.profile.full_name()
else:
name = '{first_name} {last_name}'.format(
first_name=user.first_name,
last_name=user.last_name
)
user_json = {'id': user.id, 'label': name, 'value': user.email}
if term:
if term.lower() in name.lower():
results.append(user_json)
return results
def get_onetasker_emails(submission_id, term):
submission = get_object_or_404(models.Book, pk=submission_id)
onetaskers = submission.get_onetaskers()
results = []
for user in onetaskers:
user_json = {}
name = '{first_name} {last_name}'.format(
first_name=user.first_name,
last_name=user.last_name
)
user_json['id'] = user.id
user_json['label'] = user.profile.full_name()
user_json['value'] = user.email
if (
not string_any(user_json['value'] for _ in results) and
term.lower() in name.lower()
):
results.append(user_json)
return results
def get_proposal_emails(proposal_id, term):
proposal = get_object_or_404(submission_models.Proposal, pk=proposal_id)
results = []
user = proposal.owner
name = '{first_name} {last_name}'.format(
first_name=user.first_name,
last_name=user.last_name
)
user_json = {
'id': user.id,
'label': user.profile.full_name(),
'value': user.email,
}
if (
not string_any(user_json['value'] for _ in results) and
term.lower() in name.lower()
):
results.append(user_json)
if proposal.requestor:
user = proposal.requestor
name = '{first_name} {last_name}'.format(
first_name=user.first_name,
last_name=user.last_name
)
user_json = {
'id': user.id,
'label': user.profile.full_name(),
'value': user.email,
}
if (
not string_any(user_json['value'] for result in results) and
term.lower() in name.lower()
):
results.append(user_json)
return results
def get_editors(book):
press_editors = book.press_editors.all()
book_editors = book.book_editors.all()
if book.series:
series_editor = book.series.editor
if series_editor:
series_editor_list = [series_editor]
press_editor_list = [
editor for editor in press_editors
if not editor == series_editor_list[0]
]
else:
series_editor_list = []
press_editor_list = [editor for editor in press_editors]
else:
series_editor_list = []
press_editor_list = [editor for editor in press_editors]
if book_editors:
book_editor_list = [
editor for editor in book_editors
if editor not in press_editor_list
]
else:
book_editor_list = []
return press_editor_list + series_editor_list + book_editor_list
def clean_email_list(addresses):
list_of_email_addresses = []
for address in addresses:
if '@' in address:
if address.replace(" ", "") not in list_of_email_addresses:
list_of_email_addresses.append(address.replace(" ", ""))
if len(list_of_email_addresses) < 1:
return None
return list_of_email_addresses
def send_email(
subject,
context,
from_email,
to,
html_template,
text_template=None,
):
plaintext = get_template(text_template)
htmly = get_template(html_template)
con = Context(context)
text_content = plaintext.render(con)
html_content = htmly.render(con)
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
@cache_result(300)
def press_settings():
_dict = {}
for group in models.SettingGroup.objects.all():
_dict[group.name] = {
setting.name: setting.value for setting in
models.Setting.objects.filter(group=group)
}
return _dict
def task_count(request):
"""
Counts the number of incomplete tasks assigned to the user of a request
:param request: the request containing the user object used in the query
:return: the number of incomplete talks assigned to the request's user
"""
if request.user.is_authenticated:
return models.Task.objects.filter(
assignee=request.user,
completed__isnull=True,
).count()
else:
return 0
def review_assignment_count(request):
"""
Counts the number of active reviews assigned to the user of a request
:param request: the request containing the user object used in the query
:return: the number of active reviews assigned to the request's user
"""
if request.user.is_authenticated:
return models.ReviewAssignment.objects.filter(
user=request.user,
completed__isnull=True,
declined__isnull=True,
withdrawn=False
).count() + submission_models.ProposalReview.objects.filter(
user=request.user,
completed__isnull=True,
declined__isnull=True,
withdrawn=False
).count() + models.ReviewAssignment.objects.filter(
user=request.user,
completed__isnull=False,
declined__isnull=True,
reopened=True
).count() + editorialreview_models.EditorialReview.objects.filter(
user=request.user,
completed__isnull=True,
withdrawn=False
).count()
else:
return 0
def author_tasks(user):
"""
Returns a list of revision, typeset and copyedit tasks for a given user
:param user: the user for whom to get tasks
:return: a list of incomplete assigned to the user
"""
task_list = []
if user.is_authenticated:
base_url = get_setting('base_url', 'general')
revision_tasks = revisions_models.Revision.objects.filter(
book__owner=user,
requested__isnull=False,
completed__isnull=True
).select_related(
'book'
)
copyedit_tasks = models.CopyeditAssignment.objects.filter(
book__owner=user,
author_invited__isnull=False,
author_completed__isnull=True
).select_related(
'book'
)
typeset_tasks = models.TypesetAssignment.objects.filter(
book__owner=user,
author_invited__isnull=False,
author_completed__isnull=True
).select_related(
'book'
)
for revision in revision_tasks:
task_list.append(
{
'task': 'Revisions Requested',
'title': revision.book.title,
'url': 'http://%s/revisions/%s' % (base_url, revision.id)
}
)
for copyedit in copyedit_tasks:
task_list.append(
{
'task': 'Copyedit Review',
'title': copyedit.book.title,
'url': 'http://%s/copyedit/book/%s/edit/%s/author/' % (
base_url,
copyedit.book.id,
copyedit.id
)
}
)
for typeset in typeset_tasks:
task_list.append(
{
'task': 'Typesetting Review',
'title': typeset.book.title,
'url': 'http://%s/typeset/book/%s/typeset/%s/author/' % (
base_url,
typeset.book.id,
typeset.id
)
}
)
return task_list
def typesetter_tasks(user):
active = models.TypesetAssignment.objects.filter(
(
Q(requested__isnull=False) &
Q(completed__isnull=True)
) | (
Q(typesetter_invited__isnull=False) &
Q(typesetter_completed__isnull=True)
),
typesetter=user,
).select_related(
'book',
).exclude(
declined__isnull=False,
)
completed = models.TypesetAssignment.objects.filter(
(
Q(completed__isnull=False) &
Q(typesetter_completed__isnull=True)
) | (
Q(completed__isnull=False) &
Q(typesetter_completed__isnull=False)
),
typesetter=user,
).select_related(
'book',
).order_by(
'-completed',
)[:5]
return {'active': active, 'completed': completed}
def copyeditor_tasks(user):
active = models.CopyeditAssignment.objects.filter(
copyeditor=user,
completed__isnull=True,
).exclude(
declined__isnull=False,
).select_related(
'book',
)
completed = models.CopyeditAssignment.objects.filter(
copyeditor=user,
completed__isnull=False
).select_related(
'book'
).order_by(
'-completed'
)[:5]
return {'active': active, 'completed': completed}
def indexer_tasks(user):
active = models.IndexAssignment.objects.filter(
indexer=user,
completed__isnull=True
).exclude(
declined__isnull=False
).select_related(
'book'
)
completed = models.IndexAssignment.objects.filter(
indexer=user,
completed__isnull=False
).select_related(
'book'
).order_by(
'-completed'
)[:5]
return {'active': active, 'completed': completed}
def onetasker_tasks(user):
active = []
completed = []
active_copyeditor_tasks = copyeditor_tasks(user).get('active')
completed_copyeditor_tasks = copyeditor_tasks(user).get('completed')
active_typesetter_tasks = typesetter_tasks(user).get('active')
completed_typesetter_tasks = typesetter_tasks(user).get('completed')
active_indexer_tasks = indexer_tasks(user).get('active')
completed_indexer_tasks = indexer_tasks(user).get('completed')
for assignment in active_copyeditor_tasks:
active.append({'assignment': assignment, 'type': 'copyedit', })
for assignment in active_typesetter_tasks:
active.append({'assignment': assignment, 'type': 'typesetting'})
for assignment in active_indexer_tasks:
active.append({'assignment': assignment, 'type': 'indexing'})
for assignment in completed_copyeditor_tasks:
completed.append({'assignment': assignment, 'type': 'copyedit'})
for assignment in completed_typesetter_tasks:
completed.append({'assignment': assignment, 'type': 'typesetting'})
for assignment in completed_indexer_tasks:
completed.append({'assignment': assignment, 'type': 'indexing'})
return {'completed': completed, 'active': active}
def create_new_review_round(book):
latest_round = models.ReviewRound.objects.filter(
book=book
).aggregate(
max=Max('round_number')
)
next_round = (
latest_round.get('max') + 1 if
latest_round.get('max') and
latest_round.get('max') > 0
else 1
)
return models.ReviewRound.objects.create(book=book, round_number=next_round)
def build_time_line_editing_copyedit(copyedit):
timeline = []
overdue = False
if copyedit.accepted:
if copyedit.completed and copyedit.completed > copyedit.due:
overdue = True
timeline.append({
'stage': 'Requested',
'date': copyedit.requested,
'overdue': overdue,
})
timeline.append({
'stage': 'Accepted',
'date': copyedit.accepted,
'overdue': overdue,
})
if copyedit.completed:
if overdue:
timeline.append({
'stage': 'Completed',
'date': copyedit.completed,
'overdue': overdue,
})
else:
timeline.append({
'stage': 'Completed',
'date': copyedit.completed,
'overdue': overdue,
})
else:
timeline.append({
'stage': 'Due',
'date': copyedit.due,
'overdue': overdue,
})
timeline.append({
'stage': 'Editor Review',
'date': copyedit.editor_review,
'overdue': overdue,
})
timeline.append({
'stage': 'Author Invited',
'date': copyedit.author_invited,
'overdue': overdue,
})
timeline.append({
'stage': 'Author completed',
'date': copyedit.author_completed,
'overdue': overdue,
})
else:
timeline.append({
'stage': 'Requested',
'date': copyedit.requested,
'overdue': overdue,
})
timeline.append({
'stage': 'Declined',
'date': copyedit.declined,
'declined': True,
})
clean_timeline = []
for time in timeline:
if time['date']:
if isinstance(time['date'], datetime.datetime):
time['date'] = time['date'].date()
clean_timeline.append(time)
return sorted(clean_timeline, key=lambda k: k['date'])
def build_time_line_editing_indexer(index):
timeline = []
overdue = False
if index.accepted:
if index.completed and index.completed > index.due:
overdue = True
timeline.append({
'stage': 'Requested',
'date': index.requested,
'overdue': overdue,
})
timeline.append({
'stage': 'Accepted',
'date': index.accepted,
'overdue': overdue,
})
if index.completed:
if overdue:
timeline.append({
'stage': 'Due',
'date': index.due,
'overdue': overdue,
})
timeline.append({
'stage': 'Completed',
'date': index.completed,
'overdue': overdue,
})
else:
timeline.append({
'stage': 'Completed',
'date': index.completed,
'overdue': overdue,
})
timeline.append({
'stage': 'Due',
'date': index.due,
'overdue': overdue,
})
else:
timeline.append({
'stage': 'Due',
'date': index.due,
'overdue': overdue,
})
else:
timeline.append({
'stage': 'Declined',
'date': index.declined,
'declined': True,
})
timeline.append({'stage': 'Due', 'date': index.due, 'overdue': overdue})
clean_timeline = []
for time in timeline:
if time['date']:
if isinstance(time['date'], datetime.datetime):
time['date'] = time['date'].date()
clean_timeline.append(time)
return sorted(clean_timeline, key=lambda k: k['date'])
def build_time_line(book):
timeline = []
if book.stage:
timeline.append({
'stage': 'Declined',
'date': book.stage.declined,
})
timeline.append({
'stage': 'Publication',
'date': book.stage.publication,
})
timeline.append({
'stage': 'Production',
'date': book.stage.production,
})
timeline.append({
'stage': 'Typesetting',
'date': book.stage.typesetting,
})
timeline.append({
'stage': 'Indexing',
'date': book.stage.indexing,
})
timeline.append({
'stage': 'Copyediting',
'date': book.stage.copyediting,
})
timeline.append({
'stage': 'Editing',
'date': book.stage.editing,
})
timeline.append({
'stage': 'External Review',
'date': book.stage.external_review,
})
timeline.append({
'stage': 'Internal Review',
'date': book.stage.internal_review,
})
timeline.append({
'stage': 'Review',
'date': book.stage.review,
})
if book.proposal:
timeline.append({
'stage': 'Proposal Submitted',
'date': book.proposal.date_submitted,
})
timeline.append({
'stage': 'Proposal Review Started',
'date': book.proposal.date_review_started,
})
timeline.append({
'stage': 'Proposal Accepted',
'date': book.proposal.date_accepted,
})
timeline.append({
'stage': 'Book Submitted',
'date': book.stage.submission,
})
timeline.append({
'stage': 'Proposal',
'date': book.stage.proposal,
})
clean_timeline = []
for time in timeline:
if time['date']:
if isinstance(time['date'], datetime.datetime):
time['date'] = time['date'].date()
clean_timeline.append(time)
return sorted(clean_timeline, key=lambda k: k['date'], reverse=True)
def send_proposal_review_request(
request,
proposal,
review_assignment,
email_text,
attachment=None,
access_key=None,
):
from_email = request.user.email or get_setting('from_address', 'email')
base_url = get_setting('base_url', 'general')
press_name = get_setting('press_name', 'general')
if access_key:
review_url = "http://{0}{1}".format(
base_url,
reverse(
'view_proposal_review_decision_access_key',
kwargs={
'proposal_id': proposal.id,
'assignment_id': review_assignment.id,
'access_key': access_key,
}
)
)
else:
review_url = "http://{0}{1}".format(
base_url,
reverse(
'view_proposal_review_decision',
kwargs={
'proposal_id': proposal.id,
'assignment_id': review_assignment.id,
}
)
)
if request:
from_email = "%s <%s>" % (
request.user.profile.full_name(),
from_email,
)
context = {
'review': review_assignment,
'review_url': review_url,
'proposal': proposal,
'press_name': press_name,
}
email.send_email(
get_setting(
'proposal_review_request_subject',
'email_subject',
'Proposal Review Request'
),
context,
from_email,
review_assignment.user.email,
email_text,
proposal=proposal,
attachment=attachment,
request=request,
kind='proposal_review',
access_key=access_key,
)
def send_proposal_review_reopen_request(
request,
proposal,
review_assignment,
email_text,
attachment=None,
):
from_email = request.user.email or get_setting('from_address', 'email')
base_url = get_setting('base_url', 'general')
press_name = get_setting('press_name', 'general')
if request:
from_email = "%s <%s>" % (
request.user.profile.full_name(),
from_email,
)
review_url = "http://{0}{1}".format(
base_url,
reverse(
'view_proposal_review_decision',
kwargs={
'proposal_id': proposal.id,
'assignment_id': review_assignment.id
}
))
context = {
'review': review_assignment,
'review_url': review_url,
'proposal': proposal,
'press_name': press_name,
}
email.send_email(
get_setting(
'proposal_review_reopen_subject',
'email_subject',
'Proposal Review Assignment has reopened'
),
context,
from_email,
review_assignment.user.email,
email_text,
proposal=proposal,
attachment=attachment,
request=request,
kind='proposal_review',
)
def order_data(data, relations):
ordered_data = []
for relation in relations:
if relation.element.name in data:
ordered_data.append(
[relation.element.name, data[relation.element.name]]
)
return ordered_data
def decode_json(json_data):
return json.loads(json_data)
def encode_data(data):
return smart_text(json.dumps(data))
def close_active_reviews(proposal):
for review in proposal.review_assignments.all():
review.completed = timezone.now()
review.save()
def create_submission_from_proposal(proposal, proposal_type):
book = models.Book(
title=proposal.title,
subtitle=proposal.subtitle,
owner=proposal.owner,
book_type=proposal_type,
submission_stage=1,
)
book.save()
if book.book_type == 'monograph':
submission_logic.copy_author_to_submission(proposal.owner, book)
elif book.book_type == 'edited_volume':
submission_logic.copy_editor_to_submission(proposal.owner, book)
book.save()
return book
def handle_typeset_assignment(
book,
typesetter,
files,
due_date,
email_text,
requestor,
attachment,
):
new_typesetter = models.TypesetAssignment(
book=book,
typesetter=typesetter,
requestor=requestor,
due=due_date,
)
new_typesetter.save()
for _file in files:
new_typesetter.files.add(_file)
new_typesetter.save()
send_invite_typesetter(
book,
new_typesetter,
email_text,
requestor,
attachment,
)
log.add_log_entry(
book=book,
user=requestor,
kind='typeser',
message='Typesetter %s %s assigned. Due %s' % (
typesetter.first_name,
typesetter.last_name, due_date
),
short_name='Typeset Assignment',
)
def send_decision_ack(
request,
book,
decision,
email_text,
url=None,
attachment=None
):
""" Email Handlers - TODO: move to email.py? """
from_email = request.user.email or get_setting('from_address', 'email')
if not decision == 'decline':
decision_full = "Move to " + decision
else:
decision_full = 'Reject Submission'
authors = book.author.all()
for author in authors:
context = {
'submission': book,
'author': author,
'decision': decision,
'link_to_page': url,
}
kind = "submission"
subject = get_setting(
'submission_decision_update_subject',
'email_subject',
'Submission decision update: %s' % decision_full
)
if attachment:
email.send_email(
subject,
context,
from_email,
author.author_email,
email_text,
kind=kind,
book=book,
attachment=attachment,
)
else:
email.send_email(
subject,
context,
from_email,
author.author_email,
email_text,
kind=kind,
book=book,
)
def send_editorial_decision_ack(
request,
review_assignment,
contact,
decision,
email_text,
url=None,
attachment=None,
):
from_email = request.user.email or get_setting('from_address', 'email')
publishing_committee = get_setting('publishing_committee', 'general')
decision_full = decision
if contact == 'editorial-board':
editors = review_assignment.editorial_board.all()
for editor in editors:
context = {
'submission': review_assignment.book,
'editor': editor.profile.full_name(),
'decision': decision,
'link_to_page': url,
}
subject = get_setting(
'submission_decision_update_subject',
'email_subject',
'Submission decision update: %s' % decision_full,
)
email.send_email(
subject,
context,
from_email,
editor.email,
email_text,
book=review_assignment.book,
attachment=attachment,
kind='submission',
)
elif contact == 'author':
authors = review_assignment.book.author.all()
for author in authors:
context = {
'submission': review_assignment.book,
'name': author.full_name(),
'decision': decision,
'link_to_page': url,
}
subject = get_setting(
'submission_decision_update_subject',
'email_subject',
'Submission decision update: %s' % decision_full,
)
email.send_email(
subject,
context,
from_email,
author.author_email,
email_text,
book=review_assignment.book,
attachment=attachment,
kind='submission',
)
elif contact == 'publishing-committee':
emails = clean_email_list(publishing_committee.split(';'))
context = {
'submission': review_assignment.book,
'name': 'Publishing Committee',
'decision': decision,
'link_to_page': url,
}
for current_email in emails:
subject = get_setting(
'submission_decision_update_subject',
'email_subject',
'Submission decision update: %s' % decision_full,
)
email.send_email(
subject,
context,
from_email,
current_email,
email_text,
book=review_assignment.book,
attachment=attachment,
kind='submission',
)
def send_production_editor_ack(
request,
book,
editor,
email_text,
attachment=None
):
""" Email Handlers - TODO: move to email.py? """
from_email = request.user.email or get_setting('from_address', 'email')
context = {'submission': book, 'editor': editor}
subject = get_setting(
'production_editor_subject',
'email_subject',
'Production Editor for {0}'.format(book.full_title),
)
email.send_email(
subject,
context,
from_email,
editor.email,
email_text,
book=book,
attachment=attachment,
kind='production',
)
def send_review_request(
request,
book,
review_assignment,
email_text,
sender,
attachment=None,
access_key=None,
):
from_email = request.user.email or get_setting('from_address', 'email')
base_url = get_setting('base_url', 'general')
press_name = get_setting('press_name', 'general')
if access_key:
decision_url = (
'http://{base_url}/review/{review_type}/{book_id}/assignment/'
'{review_assignment_id}/access_key/{access_key}/decision/'.format(
base_url=base_url,
review_type=review_assignment.review_type,
book_id=book.id,
review_assignment_id=review_assignment.id,
access_key=access_key,
)
)
else:
decision_url = (
'http://{base_url}/review/{review_type}/{book_id}/'
'assignment/{review_assignment_id}/decision/'.format(
base_url=base_url,
review_type=review_assignment.review_type,
book_id=book.id,
review_assignment_id=review_assignment.id,
)
)
context = {
'book': book,
'review': review_assignment,
'decision_url': decision_url,
'sender': sender,
'base_url': base_url,
'press_name': press_name,
}
email.send_email(
subject=get_setting(
'review_request_subject',
'email_subject',
'Review Request',
),
context=context,
from_email=from_email,
to=review_assignment.user.email,
html_template=email_text,
book=book,
attachment=attachment,
kind='review',
)
def send_proposal_book_editor(
request,
proposal,
email_text,
sender,
editor_email,
):
from_email = request.user.email or get_setting('from_address', 'email')
if request:
from_email = "%s <%s>" % (
request.user.profile.full_name(),
from_email,
)
context = {'proposal': proposal, 'sender': sender}
subject = get_setting(
'proposal_book_editors_subject',
'email_subject',
'[abp] Proposal Book Editors: Update',
)
email.send_email(
subject,
context,
from_email,
editor_email,
email_text,
proposal=proposal,
request=request,
kind='proposal',
)
def send_proposal_decline(request, proposal, email_text, sender):
from_email = request.user.email or get_setting('from_address', 'email')
if request:
from_email = "%s <%s>" % (
request.user.profile.full_name(),
from_email,
)
context = {'proposal': proposal, 'sender': sender}
subject = get_setting(
'proposal_declined_subject',
'email_subject',
'[abp] Proposal Declined',
)
email.send_email(
subject,
context,
from_email,
proposal.owner.email,
email_text,
proposal=proposal,
request=request,
kind='proposal',
)
def send_proposal_update(
request,
proposal,
email_text,
sender,
receiver
):
from_email = request.user.email or get_setting('from_address', 'email')
context = {'proposal': proposal, 'sender': sender, 'receiver': receiver}
subject = get_setting(
'proposal_update_subject',
'email_subject',
'[abp] Proposal Update',
)
email.send_email(
subject,
context,
from_email,
proposal.owner.email,
email_text,
proposal=proposal,
kind='proposal',
)
def send_proposal_submission_ack(request, proposal, email_text, owner):
from_email = request.user.email or get_setting('from_address', 'email')
press_name = get_setting('press_name', 'general')
principal_contact_name = get_setting('primary_contact_name', 'general')
context = {
'proposal': proposal,
'owner': owner,
'press_name': press_name,
'principal_contact_name': principal_contact_name,
}
subject = get_setting(
'proposal_submission_ack_subject',
'email_subject',
'[abp] Proposal Submission Acknowledgement',
)
email.send_email(
subject,
context,
from_email,
proposal.owner.email,
email_text,
proposal=proposal,
kind='proposal',
)
def send_proposal_change_owner_ack(request, proposal, email_text, owner):
from_email = request.user.email or get_setting('from_address', 'email')
press_name = get_setting('press_name', 'general')
principal_contact_name = get_setting('primary_contact_name', 'general')
context = {
'proposal': proposal,
'receiver': owner,
'sender': request.user,
'base_url': get_setting('base_url', 'general'),
'press_name': press_name,
'principal_contact_name': principal_contact_name,
}
subject = get_setting(
'change_principal_contact_proposal_subject',
'email_subject',
'[abp] Proposal Owner Change',
)
email.send_email(
subject,
context,
from_email,
proposal.owner.email,
email_text,
proposal=proposal,
kind='proposal',
request=request,
)
def send_task_decline(assignment, _type, email_text, sender, request):
if request.user.is_authenticated:
from_email = request.user.email
from_email = "%s <%s>" % (
request.user.profile.full_name(),
from_email,
)
else:
from_email = get_setting('from_address', 'email')
context = {'assignment': assignment, 'sender': sender}
subject = get_setting(
'assignment_declined_subject',
'email_subject',
'[abp] %s Assignment [id<%s>] Declined') % (
_type.title(),
assignment.id,
)
email.send_email(
subject,
context,
from_email,
assignment.requestor.email,
email_text,
request=request,
kind='workflow',
)
def send_proposal_accept(
request,
proposal,
email_text,
submission, sender,
attachment=None,
):
from_email = request.user.email or get_setting('from_address', 'email')
if request:
from_email = "%s <%s>" % (
request.user.profile.full_name(),
from_email,
)
context = {
'base_url': get_setting('base_url', 'general'),
'proposal': proposal,
'submission': submission,
'sender': sender,
}
subject = get_setting(
'proposal_accepted_subject',
'email_subject',
'[abp] Proposal Accepted',
)
email.send_email(
subject,
context,
from_email,
proposal.owner.email,
email_text,
proposal=proposal,
book=submission,
attachment=attachment,
request=request,
kind='proposal',
)
def send_proposal_revisions(request, proposal, email_text, sender):
from_email = request.user.email or get_setting('from_address', 'email')
press_name = get_setting('press_name', 'general')
if request:
from_email = "%s <%s>" % (
request.user.profile.full_name(),
from_email,
)
context = {
'base_url': get_setting('base_url', 'general'),
'proposal': proposal,
'sender': sender,
'press_name': press_name,
}
subject = get_setting(
'proposal_revision_required_subject',
'email_subject',
'[abp] Proposal Revisions Required',
)
email.send_email(
subject,
context,
from_email,
proposal.owner.email,
email_text,
proposal=proposal,
request=request,
kind='proposal',
)
def send_proposal_contract_author_sign_off(proposal, email_text, sender):
from_email = sender.email or get_setting('from_address', 'email')
context = {
'base_url': get_setting('base_url', 'general'),
'proposal': proposal,
'sender': sender,
}
email.send_email(get_setting('book_contract_uploaded_subject',
'email_subject', 'Book Contract Uploaded'),
context,
from_email,
proposal.owner.email,
email_text,
proposal=proposal,
kind='proposal')
def send_invite_typesetter(book, typeset, email_text, sender, attachment):
from_email = sender.email or get_setting('from_address', 'email')
context = {
'base_url': get_setting('base_url', 'general'),
'submission': typeset.book,
'typeset': typeset,
'sender': sender,
}
subject = get_setting('typesetting_subject', 'email_subject', 'Typesetting')
email.send_email(
subject,
context,
from_email,
typeset.typesetter.email,
email_text,
book=book,
attachment=attachment,
kind='typeset',
)
def send_new_user_ack(email_text, new_user, profile):
from_email = get_setting('from_address', 'email')
press_name = get_setting('press_name', 'general')
context = {
'base_url': get_setting('base_url', 'general'),
'user': new_user,
'profile': profile,
'press_name': press_name,
}
subject = get_setting(
'registration_confirmation_subject',
'email_subject',
'Registration Confirmation',
)
email.send_email(
subject,
context,
from_email,
new_user.email,
email_text,
kind='general',
)
def get_active_proposal_form():
""" Return the current active proposal form.
Looks for the first form marked as active and
not in edit (there should only be one). If not,
returns the first form it can find not in edit.
"""
active_form = models.ProposalForm.objects.filter(
active=True,
in_edit=False
).first()
if not active_form:
active_form = models.ProposalForm.objects.filter(in_edit=False).first()
return active_form
def get_file_mimetype(file_path):
"""Returns a guessed mimetype for a given file path.
Args:
file_path (str): the path to the storage location of the file.
Returns:
str
"""
mimetype_guess = mimetypes.guess_type(file_path)
mimetype = mimetype_guess[0]
if not mimetype:
mimetype = 'unknown'
return mimetype
def get_list_of_editors(proposal):
book_editors = proposal.book_editors.all()
previous_editors = []
[previous_editors.append(book_editor) for book_editor in book_editors]
all_book_editors = User.objects.filter(profile__roles__slug='book-editor')
list_of_editors = [{} for t in range(0, len(all_book_editors))]
for t, editor in enumerate(all_book_editors):
already_added = False
if editor in previous_editors:
already_added = True
list_of_editors[t] = {
'editor': editor,
'already_added': already_added,
}
return list_of_editors
@is_reviewer
def view_completed_proposal_review(request, proposal_id, assignment_id):
_proposal = get_object_or_404(submission_models.Proposal, pk=proposal_id)
proposal_form = manager_forms.GeneratedForm(
form=models.ProposalForm.objects.get(pk=_proposal.form.id)
)
relationships = models.ProposalFormElementsRelationship.objects.filter(
form=_proposal.form
)
data = json.loads(_proposal.data)
initial_data = {}
for k, v in data.items():
initial_data[k] = v[0]
proposal_form.initial = initial_data
review_assignment = get_object_or_404(
submission_models.ProposalReview,
pk=assignment_id,
withdrawn=False,
)
result = review_assignment.results
if review_assignment.review_form:
form = review_forms.GeneratedForm(form=review_assignment.review_form)
else:
review_assignment.review_form = _proposal.review_form
review_assignment.save()
form = review_forms.GeneratedForm(form=_proposal.review_form)
ci_required = get_setting('ci_required', 'general')
recommendation_form = forms.RecommendationForm(
ci_required=ci_required
)
if result:
relations = review_models.FormElementsRelationship.objects.filter(
form=result.form
)
data_ordered = order_data(
decode_json(result.data),
relations
)
else:
data_ordered = None
if not request.POST and request.GET.get('download') == 'proposal':
path = create_proposal_form(_proposal)
return serve_proposal_file(request, path)
elif not request.POST and request.GET.get('download') == 'docx':
path = create_completed_proposal_review_form(
_proposal,
review_assignment.pk
)
return serve_proposal_file(request, path)
elif request.POST:
form = review_forms.GeneratedForm(
request.POST,
request.FILES,
form=review_assignment.review_form
)
recommendation_form = forms.RecommendationForm(
request.POST,
ci_required=ci_required
)
if form.is_valid() and recommendation_form.is_valid():
save_dict = {}
file_fields = review_models.FormElementsRelationship.objects.filter(
form=review_assignment.review_form,
element__field_type='upload'
)
data_fields = review_models.FormElementsRelationship.objects.filter(
~Q(element__field_type='upload'),
form=review_assignment.review_form
)
for field in file_fields:
if field.element.name in request.FILES:
save_dict[field.element.name] = [
review_logic.handle_review_file(
request.FILES[field.element.name],
'proposal',
review_assignment,
'reviewer'
)
]
for field in data_fields:
if field.element.name in request.POST:
save_dict[field.element.name] = [
request.POST.get(field.element.name),
'text'
]
json_data = smart_text(json.dumps(save_dict))
form_results = review_models.FormResult(
form=review_assignment.review_form,
data=json_data
)
form_results.save()
if request.FILES.get('review_file_upload'):
review_logic.handle_review_file(
request.FILES.get('review_file_upload'), 'proposal',
review_assignment, 'reviewer')
review_assignment.completed = timezone.now()
if not review_assignment.accepted:
review_assignment.accepted = timezone.now()
review_assignment.recommendation = request.POST.get(
'recommendation'
)
review_assignment.competing_interests = request.POST.get(
'competing_interests'
)
review_assignment.results = form_results
review_assignment.save()
return redirect(reverse('user_dashboard'))
template = 'core/proposals/completed_review_assignment.html'
context = {
'proposal': _proposal,
'proposal_form': proposal_form,
'review_assignment': review_assignment,
'data_ordered': data_ordered,
'data_ordered_size': len(data_ordered),
'result': result,
'form': form,
'recommendation_form': recommendation_form,
'active': 'proposal_review',
'relationships': relationships,
'instructions': get_setting(
'instructions_for_task_proposal',
'general'
),
'data': data,
}
return render(request, template, context)
def create_completed_proposal_review_form(proposal, review_id):
document = Document()
if proposal.subtitle:
document.add_heading("%s: %s" % (proposal.title, proposal.subtitle), 0)
else:
document.add_heading(proposal.title, 0)
review_assignment = get_object_or_404(
submission_models.ProposalReview,
pk=review_id,
)
if review_assignment.review_form:
relations = review_models.FormElementsRelationship.objects.filter(
form=review_assignment.review_form
).order_by(
'order'
)
else:
review_assignment.review_form = proposal.review_form
review_assignment.save()
relations = review_models.FormElementsRelationship.objects.filter(
form=proposal.review_form
).order_by(
'order'
)
if review_assignment.results:
p = document.add_paragraph(
'%s completed this review assignment form.' %
review_assignment.user.profile.full_name()
)
data = json.loads(review_assignment.results.data)
for relation in relations:
v = data.get(relation.element.name)
document.add_heading(relation.element.name, level=1)
text = strip_html_tags(smart_text(v[0]))
document.add_paragraph(text).bold = True
recommendations = {
'accept': 'Accept',
'reject': 'Reject',
'revisions': 'Revisions Required'
}
document.add_heading("Recommendation", level=1)
document.add_paragraph(
recommendations[review_assignment.recommendation]
).italic = True
document.add_heading("Competing Interests", level=1)
document.add_paragraph(
review_assignment.competing_interests
).italic = True
else:
p = document.add_paragraph(
'You should complete this form and then '
'use the review assignment page to upload it.'
)
for relation in relations:
if (
relation.element.field_type in
['text', 'textarea', 'date', 'email']
):
document.add_heading(
strip_html_tags(relation.element.name) +
": _______________________________",
level=1
)
document.add_paragraph(
strip_html_tags(relation.help_text)
).italic = True
if relation.element.field_type in ['select', 'check']:
document.add_heading(
strip_html_tags(relation.element.name),
level=1
)
if relation.element.field_type == 'select':
choices = render_choices(relation.element.choices)
else:
choices = ['Y', 'N']
p = document.add_paragraph(
strip_html_tags(relation.help_text)
)
p.add_run(
' Mark your choice however you like, '
'as long as it is clear.'
).italic = True
table = document.add_table(rows=2, cols=len(choices))
hdr_cells = table.rows[0].cells
for i, choice in enumerate(choices):
hdr_cells[i].text = choice[0]
table.style = 'TableGrid'
document.add_page_break()
if not os.path.exists(os.path.join(settings.BASE_DIR, 'files', 'forms')):
os.makedirs(os.path.join(settings.BASE_DIR, 'files', 'forms'))
path = os.path.join(
settings.FORM_DIR, '%s.docx' % str(uuid4())
)
with default_storage.open(path, 'wb') as file_stream:
document.save(file_stream)
return path
def create_proposal_form(proposal):
document = Document()
document.add_heading(proposal.title, 0)
document.add_paragraph(
'You should complete this form and then '
'use the proposal page to upload it.'
)
relations = models.ProposalFormElementsRelationship.objects.filter(
form=proposal.form
).order_by(
'order'
)
document.add_heading("Title", level=1)
document.add_paragraph(proposal.title).italic = True
document.add_heading("Subtitle", level=1)
document.add_paragraph(proposal.subtitle).italic = True
document.add_heading("Author", level=1)
document.add_paragraph(proposal.author).italic = True
data = json.loads(proposal.data)
for relation in relations:
v = data.get(relation.element.name)
if v:
document.add_heading(relation.element.name, level=1)
text = BeautifulSoup(smart_text(v[0]), "html.parser").get_text()
document.add_paragraph(text).bold = True
document.add_page_break()
form_file_path = os.path.join(settings.FORM_DIR, f'{uuid4()}.docx')
with default_storage.open(form_file_path, 'wb') as file_stream:
document.save(file_stream)
return form_file_path
@is_reviewer
def serve_proposal_file(request, file_path):
try:
fsock = default_storage.open(file_path, 'r')
mimetype = get_file_mimetype(file_path)
response = StreamingHttpResponse(fsock, content_type=mimetype)
response['Content-Disposition'] = (
"attachment; filename=proposal_form.docx"
)
return response
except IOError:
messages.add_message(request, messages.ERROR, 'File not found.')
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def render_choices(choices):
c_split = choices.split('|')
return [(choice.capitalize(), choice) for choice in c_split]
@is_reviewer
def create_proposal_review_form(request, proposal):
document = Document()
document.add_heading(proposal.proposal.title, 0)
p = document.add_paragraph(
'You should complete this form and then '
'use the review page to upload it.'
)
relations = review_models.FormElementsRelationship.objects.filter(
form=proposal.review_form
).order_by(
'order'
)
for relation in relations:
if relation.element.field_type in ['text', 'textarea', 'date', 'email']:
document.add_heading(
strip_html_tags(relation.element.name) +
": _______________________________",
level=1
)
document.add_paragraph(
strip_html_tags(relation.help_text)
).italic = True
if relation.element.field_type in ['select', 'check']:
document.add_heading(
strip_html_tags(relation.element.name),
level=1
)
if relation.element.field_type == 'select':
choices = render_choices(relation.element.choices)
else:
choices = ['Y', 'N']
p = document.add_paragraph(strip_html_tags(relation.help_text))
p.add_run(
' Mark your choice however you like, as long as it is clear.'
).italic = True
table = document.add_table(rows=2, cols=len(choices))
hdr_cells = table.rows[0].cells
for i, choice in enumerate(choices):
hdr_cells[i].text = choice[0]
table.style = 'TableGrid'
document.add_page_break()
path = os.path.join(
settings.FORM_DIR, '%s.docx' % str(uuid4())
)
with default_storage.open(path, 'wb') as file_stream:
document.save(file_stream)
return path
| gpl-2.0 |
912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/conf/locale/ru/formats.py | 82 | 1271 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y г.'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j E Y г. G:i:s'
YEAR_MONTH_FORMAT = 'F Y г.'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| gpl-2.0 |
brocade/pysdn | samples/sampleopenflow/demos/demo19.py | 1 | 7388 | #!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pysdn.controller.controller import Controller
from pysdn.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
Match)
from pysdn.common.status import STATUS
from pysdn.common.utils import load_dict_from_file
from pysdn.common.constants import (ETH_TYPE_IPv6,
IP_DSCP_CS5,
IP_PROTO_TCP)
def of_demo_19():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 19 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Type
# IPv6 Source Address
# IPv6 Destination Address
# IP DSCP
# TCP Source Port
# TCP Destination Port
eth_type = ETH_TYPE_IPv6
ipv6_src = "4231::3210:3210:3210:3210/80"
ipv6_dst = "1234:1234:1234:1234::5678:5678/64"
ipv6_flabel = 33
ip_dscp = IP_DSCP_CS5 # 'Class Selector' = 'Critical'
ip_proto = IP_PROTO_TCP
tcp_src_port = 11111
tcp_dst_port = 22222
# --- Flow Actions: Output (CONTROLLER)
output_port = "CONTROLLER"
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" IPv6 Source Address (%s)\n"
" IPv6 Destination Address (%s)\n"
" IPv6 Flow Label (%s)\n"
" IP DSCP (%s)\n"
" TCP Source Port (%s)\n"
" TCP Destination Port (%s)" %
(hex(eth_type), ipv6_src, ipv6_dst, ipv6_flabel,
ip_dscp, tcp_src_port, tcp_dst_port))
print (" Action: Output (to %s)" % (output_port))
time.sleep(rundelay)
flow_entry = FlowEntry()
flow_entry.set_flow_name(flow_name="demo19.py")
table_id = 0
flow_id = 25
flow_entry.set_flow_table_id(table_id)
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_priority(flow_priority=1018)
flow_entry.set_flow_cookie(cookie=23)
flow_entry.set_flow_hard_timeout(hard_timeout=1200)
flow_entry.set_flow_idle_timeout(idle_timeout=3400)
# --- Instruction: 'Apply-actions'
# Actions: 'Output'
instruction = Instruction(instruction_order=0)
action = OutputAction(order=0, port=output_port)
instruction.add_apply_action(action)
flow_entry .add_instruction(instruction)
# --- Match Fields: Ethernet Type
# IPv6 Source Address
# IPv6 Destination Address
# IPv6 Flow Label
# IP protocol number (TCP)
# IP DSCP
# TCP Source Port
# TCP Destination Port
match = Match()
match.set_eth_type(eth_type)
match.set_ipv6_src(ipv6_src)
match.set_ipv6_dst(ipv6_dst)
match.set_ipv6_flabel(ipv6_flabel)
match.set_ip_proto(ip_proto)
match.set_ip_dscp(ip_dscp)
match.set_tcp_src(tcp_src_port)
match.set_tcp_dst(tcp_dst_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node" %
(flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_19()
| bsd-3-clause |
ljchang/neurolearn | docs/conf.py | 1 | 11594 | # -*- coding: utf-8 -*-
#
# nltools documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 4 07:22:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from mock import Mock as MagicMock
sys.path.insert(0, os.path.abspath('sphinxext'))
import sphinx_gallery
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
version = {}
with open("../nltools/version.py") as f:
exec(f.read(), version)
version = version['__version__']
# ReadTheDocks doesn't support necessary C dependencies (e.g., Atlas), so we
# mock them out per https://docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules.
class Mock(MagicMock):
__all__ = []
@classmethod
def __getattr__(cls, name):
return Mock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinxcontrib.napoleon',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Paths for sphinx gallery auto generated examples
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples',
'download_section_examples' : True,
'backreferences_dir': 'backreferences',
'plot_gallery': 'True',
}
# generate autosummary even if no references
autosummary_generate = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nltools'
copyright = u'2018, Cosan Lab'
author = u'Cosan Lab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
extlinks = {'github': 'https://github.com/cosanlab/nltools'}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org
# on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
#
# if not on_rtd: # only import and set the theme if we're building docs locally
# import sphinx_rtd_theme
# html_theme = 'sphinx_rtd_theme'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'bootswatch_theme': "sandstone",
'navbar_sidebarrel': True,
'navbar_pagenav': False,
'bootstrap_version': "3",
'globaltoc_includehidden': "true",
'source_link_position': "footer",
'globaltoc_depth': 1,
'navbar_links': [("Installation", "install"),
("API", "api"),
("Tutorials", "auto_examples/index"),
("Github", "http://www.github.com/ljchang/nltools", True)],
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'nltoolsdoc'
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# (master_doc, 'nltools.tex', u'nltools Documentation',
# u'Luke Chang', 'manual'),
# ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nltools', u'nltools Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nltools', u'nltools Documentation',
author, 'nltools', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
aospx-kitkat/platform_external_chromium_org | chrome/test/chromedriver/test/unittest_util.py | 134 | 4320 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for dealing with the python unittest module."""
import fnmatch
import sys
import unittest
class _TextTestResult(unittest._TextTestResult):
"""A test result class that can print formatted text results to a stream.
Results printed in conformance with gtest output format, like:
[ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc."
[ OK ] autofill.AutofillTest.testAutofillInvalid
[ RUN ] autofill.AutofillTest.testFillProfile: "test desc."
[ OK ] autofill.AutofillTest.testFillProfile
[ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test."
[ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters
"""
def __init__(self, stream, descriptions, verbosity):
unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
self._fails = set()
def _GetTestURI(self, test):
return '%s.%s.%s' % (test.__class__.__module__,
test.__class__.__name__,
test._testMethodName)
def getDescription(self, test):
return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription())
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.stream.writeln('[ RUN ] %s' % self.getDescription(test))
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
self.stream.writeln('[ OK ] %s' % self._GetTestURI(test))
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def getRetestFilter(self):
return ':'.join(self._fails)
class TextTestRunner(unittest.TextTestRunner):
"""Test Runner for displaying test results in textual format.
Results are displayed in conformance with google test output.
"""
def __init__(self, verbosity=1):
unittest.TextTestRunner.__init__(self, stream=sys.stderr,
verbosity=verbosity)
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def GetTestsFromSuite(suite):
"""Returns all the tests from a given test suite."""
tests = []
for x in suite:
if isinstance(x, unittest.TestSuite):
tests += GetTestsFromSuite(x)
else:
tests += [x]
return tests
def GetTestNamesFromSuite(suite):
"""Returns a list of every test name in the given suite."""
return map(lambda x: GetTestName(x), GetTestsFromSuite(suite))
def GetTestName(test):
"""Gets the test name of the given unittest test."""
return '.'.join([test.__class__.__module__,
test.__class__.__name__,
test._testMethodName])
def FilterTestSuite(suite, gtest_filter):
"""Returns a new filtered tests suite based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
return unittest.TestSuite(FilterTests(GetTestsFromSuite(suite), gtest_filter))
def FilterTests(all_tests, gtest_filter):
"""Returns a filtered list of tests based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
pattern_groups = gtest_filter.split('-')
positive_patterns = pattern_groups[0].split(':')
negative_patterns = None
if len(pattern_groups) > 1:
negative_patterns = pattern_groups[1].split(':')
tests = []
for test in all_tests:
test_name = GetTestName(test)
# Test name must by matched by one positive pattern.
for pattern in positive_patterns:
if fnmatch.fnmatch(test_name, pattern):
break
else:
continue
# Test name must not be matched by any negative patterns.
for pattern in negative_patterns or []:
if fnmatch.fnmatch(test_name, pattern):
break
else:
tests += [test]
return tests
| bsd-3-clause |
mhvk/astropy | astropy/io/misc/asdf/connect.py | 12 | 4008 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
# This file connects ASDF to the astropy.table.Table class
import functools
from astropy.io import registry as io_registry
from astropy.table import Table
def read_table(filename, data_key=None, find_table=None, **kwargs):
"""
Read a `~astropy.table.Table` object from an ASDF file
This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed.
By default, this function will look for a Table object with the key of
``data`` in the top-level ASDF tree. The parameters ``data_key`` and
``find_key`` can be used to override the default behavior.
This function is registered as the Table reader for ASDF files with the
unified I/O interface.
Parameters
----------
filename : str or :class:`py.lath:local`
Name of the file to be read
data_key : str
Optional top-level key to use for finding the Table in the tree. If not
provided, uses ``data`` by default. Use of this parameter is not
compatible with ``find_table``.
find_table : function
Optional function to be used for locating the Table in the tree. The
function takes a single parameter, which is a dictionary representing
the top of the ASDF tree. The function must return a
`~astropy.table.Table` instance.
Returns
-------
table : `~astropy.table.Table`
`~astropy.table.Table` instance
"""
try:
import asdf
except ImportError:
raise Exception(
"The asdf module is required to read and write ASDF files")
if data_key and find_table:
raise ValueError("Options 'data_key' and 'find_table' are not compatible")
with asdf.open(filename, **kwargs) as af:
if find_table:
return find_table(af.tree)
else:
return af[data_key or 'data']
def write_table(table, filename, data_key=None, make_tree=None, **kwargs):
"""
Write a `~astropy.table.Table` object to an ASDF file.
This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed.
By default, this function will write a Table object in the top-level ASDF
tree using the key of ``data``. The parameters ``data_key`` and
``make_tree`` can be used to override the default behavior.
This function is registered as the Table writer for ASDF files with the
unified I/O interface.
Parameters
----------
table : `~astropy.table.Table`
`~astropy.table.Table` instance to be written
filename : str or :class:`py.path:local`
Name of the new ASDF file to be created
data_key : str
Optional top-level key in the ASDF tree to use when writing the Table.
If not provided, uses ``data`` by default. Use of this parameter is not
compatible with ``make_tree``.
make_tree : function
Optional function to be used for creating the ASDF tree. The function
takes a single parameter, which is the `~astropy.table.Table` instance
to be written. The function must return a `dict` representing the ASDF
tree to be created.
"""
try:
import asdf
except ImportError:
raise Exception(
"The asdf module is required to read and write ASDF files")
if data_key and make_tree:
raise ValueError("Options 'data_key' and 'make_tree' are not compatible")
if make_tree:
tree = make_tree(table)
else:
tree = { data_key or 'data' : table }
with asdf.AsdfFile(tree) as af:
af.write_to(filename, **kwargs)
def asdf_identify(origin, filepath, fileobj, *args, **kwargs):
try:
import asdf
except ImportError:
return False
return filepath is not None and filepath.endswith('.asdf')
io_registry.register_reader('asdf', Table, read_table)
io_registry.register_writer('asdf', Table, write_table)
io_registry.register_identifier('asdf', Table, asdf_identify)
| bsd-3-clause |
ieguiguren/menu | conf/menuconf.py | 1 | 2865 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import time, os, calendar , datetime, recordatorio
#Generico
hostname = os.uname()[1]
if hostname == "server1":
prepath = "/opt"
elif hostname == "octopussy":
prepath = "/home/xir/dev"
path = prepath + "/menu/"
tpath = path + "tmp/"
datapath = path + "data/"
db = path + "data/usuarios.db"
descargado = "descargado.tmp"
pdfFile = "calendar.pdf"
jpgFile = "calendar"
filename = "calendar-1.jpg"
#rss = "http://cplasveredas.blogspot.com.es/p/servicio-de-comedor.html"
rss = "http://www.cpblasveredas.com/menus-de-comedor"
recordatorios = recordatorio.rec
#Tiempo
year = time.strftime("%Y")
month = time.strftime("%m")
today = time.strftime("%d")
dw = time.strftime("%w")
tow = datetime.date.today() + datetime.timedelta(days=1)
tomorrow = tow.strftime("%d")
#mes de mañana para los enlaces
tmonth = tow.strftime("%m")
mes = [ '','enero', 'febrero', 'marzo', 'abril', 'mayo', 'junio', 'julio', 'agosto', 'septiembre', 'octubre', 'noviembre','diciembre' ]
#mes a descargar en numero y letra
nmes = int(month)
if int(today) > 25:
nmes += 1
mesADescargar = mes[nmes]
firstweekday, month_length = calendar.monthrange(int(year), int(nmes))
nmes = "%02d" % nmes
#cuerpo emails:
predestacados = ''
if recordatorios != '':
predestacados = "Destacados de este mes:"
encabezado = predestacados + recordatorios + """
Hola.
Este servicio envia por correo, de lunes a jueves a las 7am, el menu del colegio para ese mismo dia y para el siguiente. Los viernes solo se envia el del propio viernes y el domingo se envia el del lunes.
"""
c_ayuda = """ Para solicitar el alta en el servicio envia un correo a [email protected] poniendo en el asunto:
alta
Para solicitar la baja en el servicio envia un correo a [email protected] poniendo en el asunto:
baja
"""
c_alta = """Para solicitar la baja en el servicio envia un correo a [email protected] poniendo en el asunto:
baja
"""
c_baja = """Si deseas volver a solicitar el alta en el servicio envia un correo a [email protected] poniendo en el asunto:
alta
"""
pie = """ Para enviar sugerencias de mejora o errores, envia en el asunto la palabra sugerencia o error y en el cuerpo del mensaje la descripcion de la idea. Si es un problema, detalla todo lo posible como se puede reproducir.
Si te resulta util este servicio y crees que deberia mejorar en algun aspecto (dominio propio, mas opciones como poder elegir la hora a la que se envian los mensajes, que el domingo se envie el menu de toda la semana o agradecer el esfuerzo de crearlo), puedes hacer una donacion a traves de Paypal a [email protected].
Muchas gracias por utilizar este servicio.
"""
ayuda = encabezado + c_ayuda + pie
alta = encabezado + c_alta + pie
baja = c_baja + pie
no_available = "Menu no disponible todavia en la web del colegio"
| gpl-3.0 |
olsonse/linuxcnc | src/emc/usr_intf/touchy/mdi.py | 8 | 10469 | # Touchy is Copyright (c) 2009 Chris Radek <[email protected]>
#
# Touchy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Touchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# self.mcodes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 30, 48, 49, 50, 51,
# 52, 53, 60, 61, 62, 63, 64, 65, 66, 67, 68)
#
# self.gcodes = (0, 10, 20, 30, 40, 50, 51, 52, 53, 70, 80, 100,
# 170, 171, 180, 181, 190, 191, 200, 210, 280, 281,
# 300, 301, 330, 331, 382, 383, 384, 385, 400, 410,
# 411, 420, 421, 430, 431, 490, 530, 540, 550, 560,
# 570, 580, 590, 591, 592, 593, 610, 611, 640, 730,
# 760, 800, 810, 820, 830, 840, 850, 860, 870, 880,
# 890, 900, 901, 910, 911, 920, 921, 922, 923, 930,
# 940, 950, 960, 970, 980, 990)
class mdi:
def __init__(self, emc):
self.clear()
self.emc = emc
self.emcstat = emc.stat()
self.emccommand = emc.command()
self.emcstat.poll()
am = self.emcstat.axis_mask
self.axes = []
self.polar = 0
axisnames = ['X', 'Y', 'Z', 'A', 'B', 'C', 'U', 'V', 'W']
for i in range(9):
if am & (1<<i):
self.axes.append(axisnames[i])
self.gcode = 'M2'
self.codes = {
'M3' : [_('Spindle CW'), 'S'],
'M4' : [_('Spindle CCW'), 'S'],
'M6' : [_('Tool change'), 'T'],
'M61' : [_('Set tool number'), 'Q'],
'M66' : [_('Input control'), 'P', 'E', 'L', 'Q'],
# 'A' means 'the axes'
'G0' : [_('Straight rapid'), 'A'],
'G00' : [_('Straight rapid'), 'A'],
'G1' : [_('Straight feed'), 'A', 'F'],
'G01' : [_('Straight feed'), 'A', 'F'],
'G2' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'P', 'F'],
'G02' : [_('Arc CW'), 'A', 'I', 'J', 'K', 'R', 'P', 'F'],
'G3' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'P', 'F'],
'G03' : [_('Arc CCW'), 'A', 'I', 'J', 'K', 'R', 'P', 'F'],
'G4' : [_('Dwell'), 'P'],
'G04' : [_('Dwell'), 'P'],
'G10' : [_('Setup'), 'L', 'P', 'A', 'Q', 'R'],
'G33' : [_('Spindle synchronized feed'), 'A', 'K'],
'G33.1' : [_('Rigid tap'), 'Z', 'K'],
'G38.2' : [_('Probe'), 'A', 'F'],
'G38.3' : [_('Probe'), 'A', 'F'],
'G38.4' : [_('Probe'), 'A', 'F'],
'G38.5' : [_('Probe'), 'A', 'F'],
'G41' : [_('Radius compensation left'), 'D'],
'G42' : [_('Radius compensation right'), 'D'],
'G41.1' : [_('Radius compensation left, immediate'), 'D', 'L'],
'G42.1' : [_('Radius compensation right, immediate'), 'D', 'L'],
'G43' : [_('Tool length offset'), 'H'],
'G43.1' : [_('Tool length offset immediate'), 'A'],
'G43.2' : [_('Tool length offset additional'), 'H'],
'G53' : [_('Motion in unoffset coordinates'), 'G', 'A', 'F'],
'G64' : [_('Continuous mode'), 'P', 'Q'],
'G76' : [_('Thread'), 'Z', 'P', 'I', 'J', 'K', 'R', 'Q', 'H', 'E', 'L'],
'G81' : [_('Drill'), 'A', 'R', 'L', 'F'],
'G82' : [_('Drill with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G83' : [_('Peck drill'), 'A', 'R', 'L', 'Q', 'F'],
'G73' : [_('Chip-break drill'), 'A', 'R', 'L', 'Q', 'F'],
'G85' : [_('Bore'), 'A', 'R', 'L', 'F'],
'G89' : [_('Bore with dwell'), 'A', 'R', 'L', 'P', 'F'],
'G92' : [_('Offset all coordinate systems'), 'A'],
'G96' : [_('CSS Mode'), 'S', 'D'],
}
self.ocodes = []
def add_macros(self, macros):
for m in macros:
words = m.split()
call = "O<%s> call" % words[0]
args = [''] + [w + ' ' for w in words[1:]]
self.ocodes.append(call)
self.codes[call] = args
def get_description(self, gcode):
return self.codes[gcode][0]
def get_words(self, gcode):
self.gcode = gcode
if gcode[0] == 'M' and gcode.find(".") == -1 and int(gcode[1:]) >= 100 and int(gcode[1:]) <= 199:
return ['P', 'Q']
if not self.codes.has_key(gcode):
return []
# strip description
words = self.codes[gcode][1:]
# replace A with the real axis names
if 'A' in words:
i = words.index('A')
words = words[:i] + self.axes + words[i+1:]
if self.polar and 'X' in self.axes and 'Y' in self.axes:
words[self.axes.index('X')] = '@'
words[self.axes.index('Y')] = '^'
return words
def clear(self):
self.words = {}
def set_word(self, word, value):
self.words[word] = value
def set_polar(self, p):
self.polar = p;
def issue(self):
m = self.gcode
if m.lower().startswith('o'):
codes = self.codes[m]
for code in self.codes[m][1:]:
v = self.words[code] or "0"
m = m + " [%s]" % v
else:
w = [i for i in self.words if len(self.words.get(i)) > 0]
if '@' in w:
m += '@' + self.words.get('@')
w.remove('@')
if '^' in w:
m += '^' + self.words.get('^')
w.remove('^')
for i in w:
if len(self.words.get(i)) > 0:
m += i + self.words.get(i)
self.emcstat.poll()
if self.emcstat.task_mode != self.emc.MODE_MDI:
self.emccommand.mode(self.emc.MODE_MDI)
self.emccommand.wait_complete()
self.emccommand.mdi(m)
class mdi_control:
def __init__(self, gtk, emc, labels, eventboxes):
self.labels = labels
self.eventboxes = eventboxes
self.numlabels = len(labels)
self.numwords = 1
self.selected = 0
self.gtk = gtk
self.mdi = mdi(emc)
for i in range(self.numlabels):
self.not_editing(i)
self.editing(self.selected)
self.set_text("G")
def not_editing(self, n):
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#ccc"))
def editing(self, n):
self.not_editing(self.selected)
self.selected = n
e = self.eventboxes[n]
e.modify_bg(self.gtk.STATE_NORMAL, self.gtk.gdk.color_parse("#fff"))
def get_text(self):
w = self.labels[self.selected]
return w.get_text()
def set_text(self, t, n = -1):
if n == -1: n = self.selected
w = self.labels[n]
w.set_text(t)
if n > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
self.mdi.set_word(head, tail)
if len(t) < 2:
w.set_alignment(1.0, 0.5)
else:
w.set_alignment(0.0, 0.5)
def clear(self, b):
t = self.get_text()
self.set_text(t.rstrip("0123456789.-"))
def back(self, b):
t = self.get_text()
if t[-1:] in "0123456789.-":
self.set_text(t[:-1])
def fill_out(self):
if self.selected == 0:
w = self.mdi.get_words(self.get_text())
self.numwords = len(w)
for i in range(1,self.numlabels):
if i <= len(w):
self.set_text(w[i-1], i)
else:
self.set_text("", i)
def next(self, b):
self.fill_out();
if self.numwords > 0:
self.editing(max(1,(self.selected+1) % (self.numwords+1)))
def ok(self, b):
self.fill_out();
self.mdi.issue()
def decimal(self, b):
t = self.get_text()
if t.find(".") == -1:
self.set_text(t + ".")
def minus(self, b):
t = self.get_text()
if self.selected > 0:
head = t.rstrip("0123456789.-")
tail = t[len(head):]
if tail.find("-") == -1:
self.set_text(head + "-" + tail)
else:
self.set_text(head + tail[1:])
def keypad(self, b):
t = self.get_text()
num = b.get_name()
self.set_text(t + num)
def gp(self, b):
self.g(b, "G", 1)
def g(self, b, code="G", polar=0):
self.mdi.set_polar(polar)
self.set_text(code, 0)
for i in range(1, self.numlabels):
self.set_text("", i)
self.editing(0)
self.mdi.clear()
def m(self, b):
self.g(b, "M")
def t(self, b):
self.g(b, "T")
def o(self, b):
old_code = self.labels[0].get_text()
ocodes = self.mdi.ocodes
if old_code in ocodes:
j = (ocodes.index(old_code) + 1) % len(ocodes)
else:
j = 0
self.g(b, ocodes[j])
self.next(b)
def select(self, eventbox, event):
n = int(eventbox.get_name()[12:])
if self.selected == 0:
self.fill_out()
if n <= self.numwords:
self.editing(n)
def set_tool(self, tool, g10l11):
self.g(0)
self.set_text("G10", 0)
self.next(0)
if g10l11:
self.set_text("L11", 1)
else:
self.set_text("L10", 1)
self.next(0)
self.set_text("P%d" % tool, 2)
self.next(0) # go to first axis
if ('X' in self.mdi.axes and
'Y' in self.mdi.axes and
'Z' in self.mdi.axes):
# this is fairly mill-like, so go to Z
self.next(0)
self.next(0)
def set_origin(self, system):
self.g(0)
self.set_text("G10", 0)
self.next(0)
self.set_text("L20", 1)
self.next(0)
self.set_text("P%d" % system, 2)
self.next(0)
if ('X' in self.mdi.axes and
'Z' in self.mdi.axes and
not 'Y' in self.mdi.axes):
# this is fairly lathe-like, so go to Z
self.next(0)
| gpl-2.0 |
JonasT/pysnapshotd | src/pysnapshotd/backuplocation.py | 2 | 57502 |
# This file is a part of pysnapshotd, a program for automated backups
# Copyright (C) 2015-2016 Jonas Thiem
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import base64
import datetime
import fnmatch
import functools
import glob
import logging
import math
import os
import queue
import sys
import textwrap
import threading
import time
import traceback
from pysnapshotd.fileio import RemoteLocationFileIO
from pysnapshotd.remotelocation import RemoteLocation
from pysnapshotd.transactioninfo import ConcurrentTransactionException, \
TransactionInfo
class RequiredBackupInfo(object):
def __init__(self, interval_name, snapshot_amount):
self.interval_name = interval_name
self.snapshot_amount = snapshot_amount
class BackupLocation(object):
def __init__(self, config):
self.config = config
if not ("source" in self.config):
raise ValueError("source location missing for backup block")
if not ("target" in self.config):
raise ValueError("target location missing for backup block")
if not ("intervals" in self.config):
raise ValueError("intervals missing for backup block")
self.source = RemoteLocation(self.config["source"])
self.target = RemoteLocation(self.config["target"])
self.intervals = {}
def interval_int(amount):
try:
i = int(amount)
except ValueError:
raise ValueError("intervals have invalid non-numerical "+\
"value for interval snapshot amount")
if i < 1:
raise ValueError("interval snapshot amount needs to be "+\
"set to 1 or higher")
return i
if "intervals" in self.config and len(self.config["intervals"]) > 0:
if "daily" in self.config["intervals"][0]:
self.intervals["daily"] = interval_int(
self.config["intervals"][0]["daily"])
if "weekly" in self.config["intervals"][0]:
self.intervals["weekly"] = interval_int(
self.config["intervals"][0]["weekly"])
if "monthly" in self.config["intervals"][0]:
self.intervals["monthly"] = interval_int(
self.config["intervals"][0]["monthly"])
# create lock file:
try:
pass
except ValueError as e:
logging.exception("unexpected internal ValueError:")
raise RuntimeError(str(e))
def __repr__(self):
return "<BackupLocation source: " + str(self.source) + ", target: " +\
str(self.target) + ">"
def print_thread_info(self):
logging.warning(" --- DUMPING ALL THREADS --- ")
for th in threading.enumerate():
logging.warning(th)
logging.warning(
" ".join(
traceback.format_stack(sys._current_frames()[th.ident])))
logging.warning("")
logging.warning(" --- END OF THREAD DUMP --- ")
def check_resume_from_location(self, location):
bin_sep = os.path.sep.encode("utf-8")
existing_folder = location.replace(b"/", bin_sep)
if not existing_folder.endswith(bin_sep):
existing_folder = existing_folder + bin_sep
if not os.path.exists(existing_folder):
return False
base_folder = os.path.normpath(os.path.join(existing_folder,
b"..", b".."))
if not base_folder.endswith(bin_sep):
base_folder += bin_sep
interval_folder = os.path.basename(
os.path.dirname(os.path.dirname(existing_folder)))
folder_name = os.path.basename(
os.path.dirname(existing_folder))
expected_base_folder = os.path.normpath(
self.target.access_info()["path"].replace(b"/", bin_sep))
if not expected_base_folder.endswith(bin_sep):
expected_base_folder += bin_sep
if len(interval_folder) == 0 or len(folder_name) == 0:
return False
if folder_name.startswith(b".Unfinished ") and \
interval_folder.decode("utf-8", "replace")\
in self.intervals:
if expected_base_folder == base_folder:
return True
return False
def backup(self, interval_info):
# Get transaction info:
try:
info = self.get_transaction_info()
except ConcurrentTransactionException:
logging.warning("Concurrent backup transaction " +\
"suspected, canceling for now")
return False
interval_name = interval_info.interval_name
# Make sure any previous unfinished transaction is taking care of:
resume_location = None
if info.active:
logging.warning("Previous unfinished transaction found. " +\
"Attempting resume...")
if info.action != "backup" and info.action != "cleanup":
logging.warning("Unexpected transaction type of "+\
"failed transaction, will cleanup anyway")
can_resume = False
if info.action == "backup":
can_resume = self.check_resume_from_location(
base64.b64decode(info.info["folder_target"]))
if not can_resume:
logging.warning("Cannot resume from previously " +\
"failed transaction, since it is not in an " +\
"accepted location: " +\
base64.b64decode(info.info["folder_target"]).\
decode("utf-8"))
if info.action != "backup" or not can_resume:
# first, update timestamp to ensure no concurrent clean-up:
info.action = "cleanup"
try:
info.save()
except ConcurrentTransactionException:
logging.warning("Concurrent backup transaction " +\
"suspected, canceling for now")
return False
new_target_temp = self.target.copy_with_changed_folder(
base64.b64decode(info.info["folder_target"]),
create_implicitly=False)
new_target_final = self.target.copy_with_changed_folder(
base64.b64decode(info.info["final_folder_target"]),
create_implicitly=False)
rio = RemoteLocationFileIO(new_target_temp)
if rio.exists(""):
rio.remove("")
rio2 = RemoteLocationFileIO(new_target_final)
if rio2.exists(""):
rio2.remove("")
info.active = False
try:
info.save()
except ConcurrentTransactionException:
logging.warning("Concurrent backup transaction " +\
"suspected, canceling for now")
return False
logging.info("Clean-up of previous left-overs complete.")
else:
resume_location = base64.b64decode(info.info["folder_target"])
# Get a FileIO for the parent folder where all intervals are stored:
interval_parent_io = RemoteLocationFileIO(
self.target.copy_with_changed_folder(
os.path.normpath(os.path.join(
os.path.join(self.target.access_info()["path"]
))),
create_implicitly=False))
# See if we have other candidates for resuming from:
if resume_location == None:
listing = interval_parent_io.list_dir(b"")
for interval_folder in listing:
if not interval_folder in self.intervals:
continue
if not listing.is_dir(interval_folder):
pass
for folder_or_file in listing:
if folder_or_file.startswith(b".Unfinished Snapshot "):
full_path = \
os.path.join(self.target.access_info()["path"],
interval_folder, folder_or_file)
full_path = os.path.normpath(full_path)
if self.check_resume_from_location(full_path):
resume_location = full_path
break
# See if we have a previously completed backup to draw existing files
# from:
existing_file_sources = []
if resume_location != None:
existing_file_sources.append(resume_location)
all_backups = []
for interval_folder in interval_parent_io.list_dir(b""):
if not interval_parent_io.is_dir(interval_folder):
continue
if not interval_folder.decode('utf-8', 'replace') \
in self.intervals:
continue
# Collect all snapshots we made for this interval in the past:
for snapshot_folder in interval_parent_io.list_dir(
interval_folder):
if not interval_parent_io.is_dir(interval_folder +\
b"/" + snapshot_folder):
continue
if not snapshot_folder.startswith(b"Snapshot "):
continue
# This is a finished old snapshot which might be a suitable
# data source for hard-linking unchanged files:
all_backups.append(
interval_parent_io.remote_location.access_info()["path"]\
+ b"/" +\
interval_folder +\
b"/" + snapshot_folder)
# Sort all old snapshots alphabetically so we can get the most recent:
def key_func(e):
e = e.replace(b"/", os.path.sep.encode("utf-8"))
if e.endswith(b"/"):
e = e[:-1]
return e.rpartition(b"/")[2]
all_backups = sorted(all_backups,
key=key_func)
if len(all_backups) > 0:
# Append newest backup as extra source (for hard-linking etc):
existing_file_sources.append(all_backups[-1])
# Set transaction info:
info.active = True
info.action = "backup"
now_name = datetime.datetime.now()
RemoteLocationFileIO(self.target).create_dir(interval_name)
temp_copy_target = base64.b64encode( # temp folder path we will use
os.path.join(self.target.access_info()["path"],
interval_name.encode("utf-8", "replace"),
datetime.datetime.utcnow().strftime(\
".Unfinished Snapshot %Y-%m-%d %H-%M-%S").encode("utf-8",
"replace"), b"")).decode('utf-8')
if resume_location != None:
# If we found something to resume, use that as a temporary folder
# instead of making a new one:
logging.info("The snapshot will be RESUMED from the previous "+\
"attempt at: " + resume_location.decode("utf-8", "replace"))
temp_copy_target = base64.b64encode(resume_location).\
decode("utf-8")
else:
logging.info("The snapshot will be STARTED FRESH in the " + \
"temporary location: " + base64.b64decode(temp_copy_target).\
decode("utf-8", "replace"))
info.info = {
"action" : "backup",
"interval" : interval_name,
"folder_target" : temp_copy_target,
"final_folder_target" : base64.b64encode(
os.path.join(self.target.access_info()["path"],
interval_name.encode("utf-8", "replace"),
datetime.datetime.utcnow().strftime(\
"Snapshot %Y-%m-%d %H-%M-%S").encode("utf-8",
"replace"), b"")).decode('utf-8'),
}
# Final actual target folder where we will move the completed backup:
new_target = self.target.copy_with_changed_folder(
base64.b64decode(info.info["folder_target"]),
create_implicitly=False)
# Make sure the temporary snapshot folder doesn't exist unless we
# are resuming:
if resume_location == None and \
RemoteLocationFileIO(new_target).exists(""):
raise RuntimeError("target snapshot directory unexpectedly "+\
"exists - aborting")
RemoteLocationFileIO(new_target).create_dir("")
if not RemoteLocationFileIO(new_target).exists(""):
raise RuntimeError("failed to create target snapshot " +\
"directory - aborting")
logging.info("Starting transfer in temporary snapshot directory: " +\
str(new_target))
# Save info before we start:
try:
info.save()
except ConcurrentTransactionException:
logging.warning("Concurrent backup transaction " +\
"suspected, canceling for now")
return False
# Launch thread that updates transaction info:
class TransactionInfoUpdateThread(threading.Thread):
def __init__(self, info):
super().__init__()
self.info = info
self.must_abort = False
self.done = False
def run(self):
try:
prevtime = time.monotonic()
prevtime_real = datetime.datetime.utcnow().timestamp()
while not self.done:
new_monotonic = time.monotonic()
new_real = datetime.datetime.utcnow().timestamp()
if abs(new_monotonic - prevtime) > 60 * 2 or \
abs(new_real - prevtime_real) > 60 * 2:
# This took too long. we're risking concurrent
# access
logging.debug("Time jump, we must abort.")
self.must_abort = True
self.done = True
return
logging.debug("Transaction info update in " +\
"backup target location")
self.info.save()
prevtime = new_monotonic
prevtime_real = new_real
# Sleep for 20 seconds:
i = 1
while i < 20 and not self.done:
time.sleep(1)
i += 1
except Exception as e:
if isinstance(e, ConcurrentTransactionException):
self.must_abort = True
self.done = True
return
logging.exception("Unexpected error in transaction "+\
"info writing thread: " + str(e))
self.done = True
self.must_abort = True
logging.debug("Launching transaction watchdog ...")
self.tinfo_thread = TransactionInfoUpdateThread(info)
# This will hold the job threads later:
jobthreads = []
try:
self.tinfo_thread.start()
# Now we have full control over this transaction with a somewhat
# minimized possibility of concurrent access.
# We can start our actual work:
# Prepare various data structures and helpers:
file_queue = queue.Queue()
directory_queue = queue.Queue()
directory_queue.put(b"")
directory_update_atime_mtime_queue = queue.Queue()
source_io = RemoteLocationFileIO(self.source)
target_io = RemoteLocationFileIO(new_target)
target_parent_io = RemoteLocationFileIO(
self.target.copy_with_changed_folder(
os.path.normpath(os.path.abspath(os.path.join(
base64.b64decode(info.info["folder_target"]), b".."))),
create_implicitly=False))
# First, clean up old unfinished snapshot folders from failed
# transactions where also the rollback failed for some reason.
# Since we're the only active transaction at this point, this
# should be safe to do.
current_tg_folder = os.path.basename(os.path.dirname(\
os.path.join(base64.b64decode(info.info["folder_target"]),
b"")))
assert(current_tg_folder.find(os.path.sep.encode("utf-8")) < 0 \
and len(current_tg_folder) > 0)
listing = target_parent_io.list_dir(b"")
for folder_or_file in listing:
if folder_or_file.startswith(b".Unfinished Snapshot ") and \
folder_or_file != current_tg_folder:
# this folder can be deleted.
target_parent_io.remove(folder_or_file)
if not target_parent_io.exists(current_tg_folder):
raise RuntimeError("new target snapshot folder unexpectedly " \
+ "disappeared after old snapshot clean-up, " +\
"aborting for now...")
# Run the user-specified commands before backup, if any:
if "source-run-cmd" in self.config:
source_io.run_cmd(self.config["source-run-cmd"])
if "target-run-cmd" in self.config:
target_io.run_cmd(self.config["target-run-cmd"])
def human_readable_bytes(byte_amount):
byte_amount = round(byte_amount)
if byte_amount < 2 * 1024:
return str(byte_amount) + "B"
elif byte_amount < 500 * 1024:
return str(round(byte_amount / 1024)) + "KB"
elif byte_amount < 500 * (1024 * 1024):
return ("%.1f" % (byte_amount / (
1024.0 * 1024.0))) +\
"MB"
elif byte_amount < 500 * (1024 * 1024 * 1024):
return ("%.1f" % (byte_amount / (
1024.0 * 1024.0 * 1024.0))) +\
"GB"
else:
return ("%.1f" % (byte_amount / (
1024.0 * 1024.0 * 1024.0 * 1024.0))) +\
"TB"
class FileStats(object):
""" Statistics tracker for files that we have seen passing
through during the backup process, which allows us to
make some basic observations about approximate transfer
speed
"""
def __init__(self):
self.start = time.time()
self.avg_measure_start = time.monotonic()
self.avg_bytes_transferred = 0
self.avg_bytes_processed = 0
self.avg_bytes_created = 0
self.lock = threading.Lock()
self.hard_link_count = 0
self.locally_copied_file_count = 0
self.processed_file_count = 0
self.processed_byte_count = 0
self.created_byte_count = 0
self.skipped_file_count = 0
def add_transferred_file(self, transferred_bytes):
self.lock.acquire()
try:
self.processed_file_count += 1
self.processed_byte_count += transferred_bytes
self.created_byte_count += transferred_bytes
self.avg_bytes_transferred += transferred_bytes
self.avg_bytes_processed += transferred_bytes
self.avg_bytes_created += transferred_bytes
finally:
self.lock.release()
def add_locally_copied_file(self, copied_bytes):
self.lock.acquire()
try:
self.processed_file_count += 1
self.locally_copied_file_count += 1
self.processed_byte_count += copied_bytes
self.created_byte_count += copied_bytes
self.avg_bytes_processed += copied_bytes
self.avg_bytes_created += copied_bytes
finally:
self.lock.release()
def add_hardlinked_file(self, hardlinked_file_size):
self.lock.acquire()
try:
self.processed_file_count += 1
self.hard_link_count += 1
self.processed_byte_count += hardlinked_file_size
self.avg_bytes_processed += hardlinked_file_size
finally:
self.lock.release()
def add_skipped_file(self, skipped_bytes):
self.lock.acquire()
try:
self.processed_file_count += 1
self.skipped_file_count += 1
self.processed_byte_count += skipped_bytes
self.avg_bytes_processed += skipped_bytes
finally:
self.lock.release()
def get_file_count(self):
self.lock.acquire()
try:
result = self.processed_file_count
finally:
self.lock.release()
return result
def get_locally_copied_file_count(self):
self.lock.acquire()
try:
result = self.locally_copied_file_count
finally:
self.lock.release()
return result
def get_hard_linked_file_count(self):
self.lock.acquire()
try:
result = self.hard_link_count
finally:
self.lock.release()
return result
def get_skipped_file_count(self):
self.lock.acquire()
try:
result = self.skipped_file_count
finally:
self.lock.release()
return result
def get_created_byte_count(self):
self.lock.acquire()
try:
result = self.created_byte_count
finally:
self.lock.release()
return result
def get_processed_byte_count(self):
self.lock.acquire()
try:
result = self.processed_byte_count
finally:
self.lock.release()
return result
def reset_average_measure(self):
self.lock.acquire()
try:
self.avg_measure_start = time.monotonic()
self.avg_bytes_transferred = 0
self.avg_bytes_created = 0
self.avg_bytes_processed = 0
finally:
self.lock.release()
def get_transfer_time(self):
return round(time.time() - self.start)
def get_average_transferred_bytes_per_second(self):
self.lock.acquire()
try:
seconds = (time.monotonic() - self.avg_measure_start)
bytes_temp = self.avg_bytes_transferred
finally:
self.lock.release()
return (bytes_temp / seconds)
def get_average_processed_bytes_per_second(self):
self.lock.acquire()
try:
seconds = (time.monotonic() - self.avg_measure_start)
bytes_temp = self.avg_bytes_processed
finally:
self.lock.release()
return (bytes_temp / seconds)
stats = FileStats()
# Base for helper thread class:
class JobThread(threading.Thread):
def __init__(self, queue):
super().__init__()
self.done = False
self.queue = queue
self.error = None
self._busy = False
self.paused = False
@property
def busy(self):
return (self._busy or (not self.queue.empty()))
def process_item(self):
raise RuntimeError(
"missing implementation of abstract method")
def run(self):
try:
while True:
# Wait for new queue item or termination:
while not self.done:
while self.paused:
time.sleep(1)
try:
if not self.queue.empty():
self._busy = True
item = self.queue.get(timeout=1)
break
else:
time.sleep(0.5)
except queue.Empty:
self._busy = False
pass
if self.done:
return
self.process_item(item)
self._busy = False
except Exception as e:
self._busy = False
self.error = e
if not isinstance(e, OSError):
logging.exception("unexpected error in " +\
"job thread")
return
else:
logging.debug("OSError encountered: " + str(e))
# Helper thread which copies files:
class FileCopyThread(JobThread):
def process_item(self, item):
file_path = item
assert(not os.path.isabs(file_path) \
and not file_path.startswith(b"~/") \
and file_path != b"~")
# Try to check if this is a symlink:
is_symlink = False
symlink_target = None
try:
is_symlink = source_io.is_symlink(file_path)
if is_symlink:
symlink_target = source_io.\
get_symlink_target(file_path)
except NotImplementedError:
is_symlink = False
logging.debug("Symlink check skipped, "+\
"not supported by either source or "+\
"target transport protocol. File: "+\
str(file_path))
# Check if this file already exists in any of the
# existing file source locations, saving us the effort of
# a network transfer:
for extra_source in existing_file_sources:
# Construct FileIO pointing to this extra data source:
assert(os.path.isabs(extra_source.replace(b"/",
os.path.sep.encode("utf-8"))))
extra_source_io = \
RemoteLocationFileIO(
target_io.remote_location.\
copy_with_changed_folder(
extra_source,
create_implicitly=False))
is_target_location = False
if (extra_source_io.remote_location
== target_io.remote_location):
is_target_location = True
# See if we already have this and it's not a link:
if not is_symlink and extra_source_io.\
exists(file_path):
print("FOUND: >>>> " + str(file_path))
known_symlink_in_target = False
try:
known_symlink_in_target = extra_source_io.\
is_symlink(file_path)
except NotImplementedError:
pass
if not known_symlink_in_target:
# Try to get file hashes and compare them:
source_hash = None
target_hash = None
try:
source_hash = source_io.sha2sum(file_path)
target_hash = extra_source_io.sha2sum(
file_path)
except NotImplementedError:
pass
# See if the hashes do match:
if source_hash != None and \
source_hash == target_hash:
# Try to get file sizes for statistics:
file_size = 0
try:
file_size = source_io.size(file_path)
except NotImplementedError:
pass
try:
file_size = max(file_size,
extra_source_io.size(file_path))
except NotImplementedError:
pass
if is_target_location:
# This file already exists in our
# resumed location, we can skip it:
logging.debug(
"File skipped, " +\
"it was already found in "+\
"target location " +\
"with matching SHA2 hash: " +\
str(file_path))
stats.add_skipped_file(file_size)
return
else:
# This file exists in an additional
# data source (old/other snapshot)
# -> copy into our current snapshot:
# Get absolute path of copy target:
tg_io_fpath = os.path.join(
target_io.remote_location.\
access_info()\
["path"], os.path.basename(
file_path))
# Try a hard-link first (saves space):
try:
extra_source_io.\
add_hard_link_to_file(
file_path,
tg_io_fpath)
logging.debug(
"File hard-linked "+\
"from extra source at " +\
str(extra_source) + " since "+\
"it is there with matching "+\
"SHA2 hash: " + str(file_path))
stats.add_hardlinked_file(file_size)
return
except NotImplementedError:
# No hard-links? Try regular copy
try:
extra_source_io.\
copy_to_abspath(
file_path, tg_io_fpath)
logging.debug(
"File found and copied "+\
"(hard-linking " +\
"unavailable) "+\
"from extra source at " +\
str(extra_source) +\
" since "+\
"it is there with " +\
"matching "+\
"SHA2 hash: " +\
str(file_path))
stats.add_locally_copied_file(
file_size)
return
except NotImplementedError:
# If no direct copy in the
# remote location is supported
# we will just resume
# regularly.
pass
else:
if is_target_location:
logging.debug(
"Old copy removed and to be " +\
"replaced since " +\
"in target location with " +\
"wrong SHA2 hash: " +\
file_path.decode('utf-8',
'replace'))
target_io.remove(file_path)
else:
logging.debug("File found in " +\
"an extra source at " +\
extra_source.decode('utf-8',
'replace') +\
" but SHA2 "+\
"unobtainable or mismatching: " +\
file_path.decode('utf-8',
'replace'))
else:
print("FILE NOT FOUND ANYWHERE: " + str(file_path))
file_size = 0
if not is_symlink:
# Copy file:
logging.debug("Copying file: " + \
str(file_path))
local_temp_file_path = None
try:
local_temp_file_path = source_io.read_from(
file_path)
file_size = os.path.getsize(
local_temp_file_path)
target_io.write_to(file_path,
local_temp_file_path)
finally:
try:
if local_temp_file_path != None:
os.remove(local_temp_file_path)
except FileNotFoundError:
pass
else:
# Check if the link is already present (when resuming)
if target_io.exists(file_path):
logging.debug(
"Intended symlink location is already an "+\
"existing path (are we resuming?) -> " +\
"removing existing file or " +\
"symlink at: " + str(file_path))
target_io.remove(file_path)
# Set symlink target:
logging.debug("Creating symlink at: " +\
str(file_path))
target_io.create_symlink(file_path,
symlink_target)
stats.add_transferred_file(file_size)
# copy file permissions if supported:
try:
bits = source_io.get_permission_bits(
file_path)
target_io.set_permission_bits(file_path,
bits)
logging.debug("Copied file permissions: " +\
str(file_path))
except NotImplementedError:
logging.debug("File permissions skipped, "+\
"not supported by either source or "+\
"target transport protocol. " +\
"File: " + str(file_path))
pass
# copy file atime/mtime if supported:
try:
(atime, mtime) = source_io.\
get_access_and_modification_time(
file_path)
target_io.set_access_and_modification_time(
file_path, atime, mtime)
logging.debug("Copied file atime/mtime: " +\
str(file_path))
except OSError as e:
if is_symlink and \
target_io.\
affected_by_networking_failures():
logging.debug("Setting file " +\
"permissions on symlink itself "+\
"failed, might be a Linux "+\
"and not a BSD target system where "+\
"this is expected, or an " +\
"unsupported "+\
"Unix variant")
pass
else:
raise e
except NotImplementedError:
logging.debug("File permissions skipped, "+\
"not supported by either source or "+\
"target transport protocol")
pass
# Helper thread which scans directories:
class DirectoryScannerThread(JobThread):
def __init__(self, directory_scan_queue, file_queue,
directory_atime_mtime_queue, config):
super().__init__(directory_scan_queue)
self.file_queue = file_queue
self.directory_atime_mtime_queue = \
directory_atime_mtime_queue
self.config = config
def process_item(self, item):
directory = item
logging.debug("Scanning remote directory: " +\
str(directory))
if directory != "" and directory != b"":
# copy directory permissions if supported:
try:
bits = source_io.get_permission_bits(directory)
target_io.set_permission_bits(directory, bits)
logging.debug("Copied directory permissions: " +\
str(directory))
except NotImplementedError:
logging.debug("Directory permissions skipped, "+\
"not supported by either source or target "+\
"transport protocol")
pass
# schedule setting atime/mtime for this directory:
self.directory_atime_mtime_queue.put(directory)
# scan directory for files:
entries = source_io.list_dir(directory)
for entry in entries:
full_path = os.path.join(directory, entry)
# check exclusion patterns:
if "exclude" in self.config:
excluded = False
for pattern in self.config["exclude"]:
if fnmatch.fnmatch(full_path,
pattern.encode("utf-8")):
logging.debug("Skipping " +\
str(full_path) +\
" since it matches exclusion " +\
"pattern " + str(pattern))
excluded = True
break
if excluded:
continue
# add file/directory for processing:
if source_io.is_dir(full_path):
self.queue.put(full_path)
target_io.create_dir(full_path)
else:
self.file_queue.put(full_path)
def check_if_threads_done():
# Do an initial quick check if any is busy, in which case
# we abort:
for t in jobthreads:
if t.busy:
return False
# Pause all threads for a definite check:
for t in jobthreads:
t.paused = True
# Give threads a short moment to pause:
time.sleep(2)
# Check if any of the threads is still marked as busy:
all_done = True
for t in jobthreads:
if t.busy:
all_done = False
break
# Unpause first, this is required for clean shutdown later
# even if all threads aren't busy anymore:
for t in jobthreads:
t.paused = False
# Report result:
if all_done:
return True
return False
# launch file copy threads:
i = 0
while i < 10:
new_fct = FileCopyThread(file_queue)
new_fct.start()
jobthreads.append(new_fct)
i += 1
# launch directory scanner threads:
i = 0
while i < 2:
new_dst = DirectoryScannerThread(directory_queue,
file_queue, directory_update_atime_mtime_queue,
self.config)
new_dst.start()
jobthreads.append(new_dst)
i += 1
def check_job_errors():
for t in jobthreads:
if t.error != None:
if not isinstance(t.error, OSError):
logging.error("unexpected error occured in " +\
"job thread: " + str(t.error))
raise t.error
# Do monitoring of threads and deal with directory atime/mtime:
last_stat_output = time.time()
while True:
if self.tinfo_thread.must_abort == True:
# we were stuck too long, risking concurrent access.
# -> abort here, retry later
logging.warning("Transaction info write timeout, " +\
"aborting backup process ...")
return False
if last_stat_output + (60 * 2) < time.time():
last_stat_output = time.time()
# Put together total transfer time text:
show_seconds = stats.get_transfer_time()
show_minutes = math.floor(show_seconds / 60.0)
show_seconds -= show_minutes * 60
show_hours = math.floor(show_minutes / 60.0)
show_minutes -= show_hours * 60
time_str = "{} second(s)".format(show_seconds)
if show_minutes > 0:
time_str = "{} minute(s) ".format(show_minutes) +\
time_str
if show_hours > 0:
time_str = "{} hour(s) ".format(show_hours) +\
time_str
# Get different transfer rate measurements:
target_io_transfer_rate = 0
try:
target_io_transfer_rate = \
target_io.get_transfer_rate()
except AttributeError:
pass
source_io_transfer_rate = 0
try:
source_io_transfer_rate = \
source_io.get_transfer_rate()
except AttributeError:
pass
added_files_transfer_rate = \
stats.get_average_transferred_bytes_per_second()
# Get processing rate:
added_files_processed_rate = \
stats.get_average_processed_bytes_per_second()
# Try FileIO transfer rates first, they are more accurate:
transfer_rate = max(source_io_transfer_rate,
target_io_transfer_rate)
# Fall back to the less reliable added files copy rate if
# FileIO transfer rates appear to be unavailable:
if transfer_rate <= 0.00001:
transfer_rate = added_files_transfer_rate
# Output stats:
logging.info(("Stats of active backup process as of " +\
datetime.datetime.utcnow().strftime(\
"%Y-%m-%d %H:%M:%S UTC+0") + ": " +\
textwrap.dedent('''\
{} file(s) processed ({} local copies,
{} file(s) hard-links, {} already present),
{} processed in total, with {}
newly created on disk, network transfer rate {}/s,
processing rate {}/s, total transfer time {}
''').format(
stats.get_file_count(),
stats.get_locally_copied_file_count(),
stats.get_hard_linked_file_count(),
stats.get_skipped_file_count(),
human_readable_bytes(stats.get_processed_byte_count()),
human_readable_bytes(stats.get_created_byte_count()),
human_readable_bytes(transfer_rate),
human_readable_bytes(added_files_processed_rate),
time_str)).\
replace("\n", " ").strip())
stats.reset_average_measure()
# check file copy threads for errors:
check_job_errors()
# set directory modification, access times:
while not directory_update_atime_mtime_queue.empty():
check_job_errors()
directory = directory_update_atime_mtime_queue.get()
# copy directory atime, mtime if supported:
try:
(atime, mtime) = source_io.\
get_access_and_modification_time(directory)
target_io.set_access_and_modification_time(directory,
atime, mtime)
logging.debug("Copied directory atime/mtime: " +\
str(directory))
except NotImplementedError:
logging.debug("Directory atime/mtime skipped, not "+\
"supported by either source or target "+\
"transport protocol")
pass
if check_if_threads_done() and \
directory_update_atime_mtime_queue.empty():
# we're done!
break
else:
time.sleep(1)
# Shutdown threads:
for t in jobthreads:
t.done = True
for t in jobthreads:
t.join(60)
while t.is_alive():
logging.warning("Terminating job thread timed out. " +\
"Stuck thread???")
self.print_thread_info()
t.join(60)
# Final rename:
rename_prev = os.path.basename(os.path.dirname(base64.b64decode(\
info.info["folder_target"])))
rename_after = os.path.basename(os.path.dirname(base64.b64decode(\
info.info["final_folder_target"])))
logging.debug("Final rename will be from " + str(rename_prev) + " " +\
"to " + str(rename_after) + "...")
if not target_parent_io.exists(rename_prev):
raise RuntimeError("temporary backup snapshot folder " +\
"unexpectedly not existing, aborting")
target_parent_io.move(rename_prev, rename_after)
logging.info("Snapshot completed at: " +\
base64.b64decode(info.info["final_folder_target"]).\
decode("utf-8"))
except OSError as e:
# stop thread which saves transaction data in intervals:
self.tinfo_thread.done = True
self.tinfo_thread.join()
# save transaction as not active:
info.active = False
info.save()
raise e
except Exception as e:
logging.exception(
"An unexpected error occured during backup. Aborting...")
finally:
# stop the thread which saves transaction data in intervals:
if not self.tinfo_thread.done:
self.tinfo_thread.done = True
try:
self.tinfo_thread.join()
except (KeyboardInterrupt, SystemExit):
pass
# stop job threads:
for t in jobthreads:
t.done = True
for t in jobthreads:
try:
t.join(60)
except Exception as e:
pass
while t.is_alive():
logging.warning("Terminating job thread timed out. " +\
"Stuck thread???")
self.print_thread_info()
try:
t.join(60)
except Exception as e:
pass
if self.tinfo_thread.must_abort == True:
# we were stuck too long, risking concurrent access.
# -> abort here, retry later
logging.warning("Transaction info write timeout, " +\
"aborting backup process to " + str(self.target))
return False
# store completed transaction info:
info.active = False
info.global_info["last-backup"][interval_name] =\
int(datetime.datetime.utcnow().timestamp())
info.save()
# delete old snapshots here if interval limit is reached:
try:
snapshots = []
for possible_snapshot in target_parent_io.list_dir(""):
if target_parent_io.is_dir(possible_snapshot) and \
possible_snapshot.startswith(b"Snapshot "):
snapshots.append(possible_snapshot)
snapshots.sort(reverse=True)
if len(snapshots) > interval_info.snapshot_amount:
logging.info("The backup location " + str(self) +\
" has " + str(len(snapshots)) + " snapshots for "+\
"the " + str(interval_name) + " interval, but the "+\
"limit is " + str(interval_info.snapshot_amount) +\
". Excessive old snapshots will be deleted...")
# collect snapshots to be removed:
to_be_deleted = []
i = interval_info.snapshot_amount
while i < len(snapshots):
to_be_deleted.append(snapshots[i])
i += 1
# remove those old snapshots:
for snapshot in to_be_deleted:
logging.info("Deleting old snapshot '" + str(snapshot) +\
"' of " + str(self) + "...")
target_parent_io.remove(snapshot)
except OSError:
logging.warning("Interruption when attempting to clean up old "+\
"backups by OSError - possible temporary file i/o access "+\
"problem. Will re-attempt cleanup with next snapshot. " +\
"Affected target: " + str(self.target) + ", affected "+\
"interval: " + str(interval_name))
return True
def requires_backup(self):
""" Return the name of the interval that needs to be backed up
"""
info = self.get_transaction_info()
if "daily" in self.intervals:
if info.global_info["last-backup"]["daily"] == None:
return RequiredBackupInfo("daily", self.intervals["daily"])
dt = datetime.datetime.utcfromtimestamp(info.global_info\
["last-backup"]["daily"])
dtnow = datetime.datetime.utcnow()
if dtnow.day > dt.day:
return RequiredBackupInfo("daily", self.intervals["daily"])
if "weekly" in self.intervals:
if info.global_info["last-backup"]["weekly"] == None:
return RequiredBackupInfo("weekly", self.intervals["weekly"])
dt = datetime.datetime.utcfromtimestamp(info.global_info\
["last-backup"]["weekly"])
dtnow = datetime.datetime.utcnow()
if dtnow.day >= dt.day + 7:
return RequiredBackupInfo("weekly", self.intervals["weekly"])
if "monthly" in self.intervals:
if info.global_info["last-backup"]["monthly"] == None:
return RequiredBackupInfo("monthly", self.intervals["monthly"])
dt = datetime.datetime.utcfromtimestamp(info.global_info\
["last-backup"]["monthly"])
dtnow = datetime.datetime.utcnow()
if dtnow.month > dt.month:
return RequiredBackupInfo("monthly", self.intervals["monthly"])
return None
def get_transaction_info(self):
return TransactionInfo(RemoteLocationFileIO(self.target))
| gpl-2.0 |
B3AU/waveTree | sklearn/utils/testing.py | 4 | 12125 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# License: BSD 3 clause
import inspect
import pkgutil
import warnings
import scipy as sp
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
from .fixes import savemat
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises", "raises",
"with_setup", "assert_true", "assert_false", "assert_almost_equal",
"assert_array_equal", "assert_array_almost_equal",
"assert_array_less"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
# To remove when we support numpy 1.7
def assert_warns(warning_class, func, *args, **kw):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not w[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)"
% (func.__name__, warning_class, w[0]))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exception, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions"""
try:
function(*args, **kwargs)
raise AssertionError("Should have raised %r" % exception(message))
except exception as e:
error_message = str(e)
assert_in(message, error_message)
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
meta_estimators = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
other = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV"]
def all_estimators(include_meta_estimators=False, include_other=False,
type_filter=None):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_others : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
type_filter : string or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
module = __import__(modname, fromlist="dummy")
if ".tests." in modname:
continue
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_other:
estimators = [c for c in estimators if not c[0] in other]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in meta_estimators]
if type_filter == 'classifier':
estimators = [est for est in estimators
if issubclass(est[1], ClassifierMixin)]
elif type_filter == 'regressor':
estimators = [est for est in estimators
if issubclass(est[1], RegressorMixin)]
elif type_filter == 'transformer':
estimators = [est for est in estimators
if issubclass(est[1], TransformerMixin)]
elif type_filter == 'cluster':
estimators = [est for est in estimators
if issubclass(est[1], ClusterMixin)]
elif type_filter is not None:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# We sort in order to have reproducible test failures
return sorted(estimators)
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
matplotlib.pylab.figure()
except:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
| bsd-3-clause |
allenlavoie/tensorflow | tensorflow/python/kernel_tests/rnn_test.py | 2 | 26456 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_lib
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Plus1RNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def call(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class ScalarStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return tensor_shape.TensorShape([])
def zero_state(self, batch_size, dtype):
return array_ops.zeros([], dtype=dtypes.int32)
def call(self, input_, state, scope=None):
return (input_, state + 1)
class TensorArrayStateRNNCell(rnn_cell_impl.RNNCell):
"""RNN Cell its state as a TensorArray."""
@property
def output_size(self):
return 1
@property
def state_size(self):
return (tensor_shape.TensorShape([]), ())
def zero_state(self, batch_size, dtype):
return (array_ops.zeros([], dtype=dtypes.int32),
tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True))
def call(self, input_, state, scope=None):
new_array = state[1].write(state[0], input_)
return (input_, (state[0] + 1, new_array))
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_in_graph_and_eager_modes()
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
if context.executing_eagerly():
inputs = [constant_op.constant(np.ones((3, 4)))]
else:
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
@test_util.run_in_graph_and_eager_modes()
def testBatchSizeFromInput(self):
cell = Plus1RNNCell()
in_eager_mode = context.executing_eagerly()
# With static batch size
if in_eager_mode:
inputs = np.zeros((3, 4, 5), dtype=np.float32)
initial_state = np.zeros((3, 5), dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, 5))
initial_state = array_ops.placeholder(dtypes.float32, shape=(3, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell, inputs, initial_state=initial_state)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# Without static batch size
# Tensor shapes are fully determined with eager execution enabled,
# so only run this test for graph construction.
if not in_eager_mode:
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell,
inputs,
initial_state=array_ops.placeholder(dtypes.float32, shape=(None, 5)))
self.assertEqual(None, outputs.shape[0].value)
self.assertEqual(None, state.shape[0].value)
@test_util.run_in_graph_and_eager_modes()
def testScalarStateIsAccepted(self):
cell = ScalarStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.test_session() as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes()
def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.test_session() as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
state = (state[0], state[1].stack())
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={
inputs: [[[1], [2], [3], [4]]]
})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state[0])
self.assertAllEqual([[[1]], [[2]], [[3]], [[4]]], state[1])
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
_static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
_static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = contrib_rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = contrib_rnn.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
class BenchmarkRNN(test.Benchmark):
def benchmarkGraphCreationStaticVsDynamicLSTM(self):
print("Graph Creation: Static Unroll vs. Dynamic Unroll LSTM")
print("max_t \t dt(static) \t dt(dynamic) \t dt(dynamic)/dt(static)")
for max_time in (1, 25, 50):
s_dt, d_dt = graph_creation_static_vs_dynamic_rnn_benchmark(max_time)
self.report_benchmark(
name="graph_creation_time_static_T%02d" % max_time,
iters=5,
wall_time=s_dt)
self.report_benchmark(
name="graph_creation_time_dynamic_T%02d" % max_time,
iters=5,
wall_time=d_dt)
def benchmarkStaticUnrollVsDynamicFlowLSTM(self):
print("Calculation: Static Unroll with Dynamic Flow LSTM "
"vs. Dynamic Unroll LSTM")
print("batch \t max_t \t units \t gpu \t dt(static) \t dt(dynamic) "
"\t dt(dynamic)/dt(static)")
for batch_size in (256,):
for max_time in (50,):
for num_units in (512, 256, 128):
for use_gpu in (False, True):
s_dt, d_dt = static_vs_dynamic_rnn_benchmark(batch_size, max_time,
num_units, use_gpu)
self.report_benchmark(
name="static_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="dynamic_unroll_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkDynamicLSTMNoMemorySwapVsMemorySwap(self):
print("Calculation: Dynamic LSTM No Memory Swap vs. Memory Swap")
print("batch \t max_t \t units \t no_swap \t swap \t swap/no_swap")
for batch_size in (256, 512):
for max_time in (100,):
for num_units in (512, 256, 128):
no_swap, swap = dynamic_rnn_swap_memory_benchmark(batch_size,
max_time, num_units)
self.report_benchmark(
name="dynamic_lstm_no_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=no_swap)
self.report_benchmark(
name="dynamic_lstm_with_memory_swap_T%02d_B%03d_N%03d" %
(max_time, batch_size, num_units),
iters=20,
wall_time=swap)
def benchmarkStaticUnrollHalfSequenceLengthVsHalfUnroll(self):
print("Calculation: Static Unroll with Halved Sequence Length "
"vs. Half Static Unroll")
print("batch \t full_t \t units \t gpu \t dt(half_seq_len) "
"\t dt(unroll_half) \t dt(half_seq_len)/dt(unroll_half)")
for batch_size in (128,):
for max_time in (50,):
for num_units in (256,):
for use_gpu in (False, True):
s_dt, d_dt = half_seq_len_vs_unroll_half_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="half_seq_len_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=s_dt)
self.report_benchmark(
name="unroll_half_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=d_dt)
def benchmarkStaticUnrollStateConcatVsStateTuple(self):
print("Calculation: Static Unroll with Concatenated State "
"vs. Tuple State")
print("batch \t time \t units \t gpu \t dt(concat_state) "
"\t dt(tuple_state) \t dt(concat_state)/dt(tuple_state)")
for batch_size in (
16,
128,):
for max_time in (50,):
for num_units in (
16,
128,):
for use_gpu in (False, True):
c_dt, t_dt = concat_state_vs_tuple_state_rnn_benchmark(batch_size,
max_time,
num_units,
use_gpu)
self.report_benchmark(
name="concat_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=c_dt)
self.report_benchmark(
name="tuple_state_time_T%02d_B%03d_N%03d_gpu_%s" %
(max_time, batch_size, num_units, use_gpu),
iters=20,
wall_time=t_dt)
def _benchmarkDynamicLSTMMemorySwapLongSeq(self):
"""The memory swapping test for the SOSP submission."""
print("Calculation: Long LSTM Sequence")
print("batch \t len \t units \t dynamic \t elapsed_t \t elapsed_t/len")
batch_size = 512
seqlen = 800
num_units = 512
dynamic = True
swap_memory = True
# Some warming up.
if swap_memory:
rnn_long_sequence_benchmark(batch_size, seqlen, num_units,
dynamic, swap_memory, 2)
# Measure the performance.
for slen in xrange(100, 1100, 100):
rnn_long_sequence_benchmark(batch_size, slen, num_units, dynamic,
swap_memory, 3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
denisenkom/django | django/forms/models.py | 2 | 51990 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from __future__ import unicode_literals
from collections import OrderedDict
import warnings
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS, FieldError
from django.forms.fields import Field, ChoiceField
from django.forms.forms import BaseForm, get_declared_fields
from django.forms.formsets import BaseFormSet, formset_factory
from django.forms.util import ErrorList
from django.forms.widgets import (SelectMultiple, HiddenInput,
MultipleHiddenInput, media_property, CheckboxSelectMultiple)
from django.utils.encoding import smart_text, force_text
from django.utils import six
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext, string_concat
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
'ALL_FIELDS',
)
ALL_FIELDS = '__all__'
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or not f.name in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
for f in opts.many_to_many:
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in opts.concrete_fields + opts.many_to_many:
if not f.editable:
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primary key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
data[f.name] = list(f.value_from_object(instance).values_list('pk', flat=True))
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None,
formfield_callback=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ``OrderedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
"""
field_list = []
ignored = []
opts = model._meta
for f in sorted(opts.concrete_fields + opts.many_to_many):
if not f.editable:
continue
if fields is not None and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
kwargs = {}
if widgets and f.name in widgets:
kwargs['widget'] = widgets[f.name]
if localized_fields == ALL_FIELDS or (localized_fields and f.name in localized_fields):
kwargs['localize'] = True
if labels and f.name in labels:
kwargs['label'] = labels[f.name]
if help_texts and f.name in help_texts:
kwargs['help_text'] = help_texts[f.name]
if error_messages and f.name in error_messages:
kwargs['error_messages'] = error_messages[f.name]
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.localized_fields = getattr(options, 'localized_fields', None)
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
self.error_messages = getattr(options, 'error_messages', None)
class ModelFormMetaclass(type):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [b for b in bases if issubclass(b, ModelForm)]
except NameError:
# We are defining ModelForm itself.
parents = None
declared_fields = get_declared_fields(bases, attrs, False)
new_class = super(ModelFormMetaclass, cls).__new__(cls, name, bases,
attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
# We check if a string was passed to `fields` or `exclude`,
# which is likely to be a mistake where the user typed ('foo') instead
# of ('foo',)
for opt in ['fields', 'exclude', 'localized_fields']:
value = getattr(opts, opt)
if isinstance(value, six.string_types) and value != ALL_FIELDS:
msg = ("%(model)s.Meta.%(opt)s cannot be a string. "
"Did you mean to type: ('%(value)s',)?" % {
'model': new_class.__name__,
'opt': opt,
'value': value,
})
raise TypeError(msg)
if opts.model:
# If a model is defined, extract form fields from it.
if opts.fields is None and opts.exclude is None:
# This should be some kind of assertion error once deprecation
# cycle is complete.
warnings.warn("Creating a ModelForm without either the 'fields' attribute "
"or the 'exclude' attribute is deprecated - form %s "
"needs updating" % name,
DeprecationWarning, stacklevel=2)
if opts.fields == ALL_FIELDS:
# sentinel for fields_for_model to indicate "get the list of
# fields from the model"
opts.fields = None
fields = fields_for_model(opts.model, opts.fields, opts.exclude,
opts.widgets, formfield_callback,
opts.localized_fields, opts.labels,
opts.help_texts, opts.error_messages)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in six.iteritems(fields) if not v]
missing_fields = set(none_model_fields) - \
set(declared_fields.keys())
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(declared_fields)
else:
fields = declared_fields
new_class.declared_fields = declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, instance=None):
opts = self._meta
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
if instance is None:
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
def _update_errors(self, errors):
for field, messages in errors.error_dict.items():
if field not in self.fields:
continue
field = self.fields[field]
for message in messages:
if isinstance(message, ValidationError):
if message.code in field.error_messages:
message.message = field.error_messages[message.code]
message_dict = errors.message_dict
for k, v in message_dict.items():
if k != NON_FIELD_ERRORS:
self._errors.setdefault(k, self.error_class()).extend(v)
# Remove the data from the cleaned_data dict since it was invalid
if k in self.cleaned_data:
del self.cleaned_data[k]
if NON_FIELD_ERRORS in message_dict:
messages = message_dict[NON_FIELD_ERRORS]
self._errors.setdefault(NON_FIELD_ERRORS, self.error_class()).extend(messages)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validation if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in form_field.empty_values:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for f_name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(f_name)
try:
self.instance.full_clean(exclude=exclude,
validate_unique=False)
except ValidationError as e:
self._update_errors(e)
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError as e:
self._update_errors(e)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, self._meta.exclude,
construct=False)
save.alters_data = True
class ModelForm(six.with_metaclass(ModelFormMetaclass, BaseModelForm)):
pass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None, widgets=None, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a ModelForm containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields. If omitted or '__all__',
all fields will be used.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
``widgets`` is a dictionary of model field names mapped to a widget.
``localized_fields`` is a list of names of fields which should be localized.
``formfield_callback`` is a callable that takes a model field and returns
a form field.
``labels`` is a dictionary of model field names mapped to a label.
``help_texts`` is a dictionary of model field names mapped to a help text.
``error_messages`` is a dictionary of model field names mapped to a
dictionary of error messages.
"""
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if widgets is not None:
attrs['widgets'] = widgets
if localized_fields is not None:
attrs['localized_fields'] = localized_fields
if labels is not None:
attrs['labels'] = labels
if help_texts is not None:
attrs['help_texts'] = help_texts
if error_messages is not None:
attrs['error_messages'] = error_messages
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + str('Form')
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
# The ModelFormMetaclass will trigger a similar warning/error, but this will
# be difficult to debug for code that needs updating, so we produce the
# warning here too.
if (getattr(Meta, 'fields', None) is None and
getattr(Meta, 'exclude', None) is None):
warnings.warn("Calling modelform_factory without defining 'fields' or "
"'exclude' explicitly is deprecated",
DeprecationWarning, stacklevel=2)
# Instatiate type(form) in order to use the same metaclass as form.
return type(form)(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
self.initial_extra = kwargs.pop('initial', None)
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = dict((o.pk, o) for o in self.get_queryset())
return self._object_dict.get(pk)
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
# Import goes here instead of module-level because importing
# django.db has side effects.
from django.db import connections
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
pk = pk_field.get_db_prep_lookup('exact', pk,
connection=connections[self.get_queryset().db])
if isinstance(pk, list):
pk = pk[0]
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and not kwargs.get('instance'):
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i-self.initial_form_count()]
except IndexError:
pass
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_queryset()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
save.alters_data = True
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# get data for each field of each of unique_check
row_data = (form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and not None in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, six.text_type(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': six.text_type(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
pk_name = self._pk_field.name
raw_pk_value = form._raw_value(pk_name)
# clean() for different types of PK fields can sometimes return
# the model instance, and sometimes the PK. Handle either.
pk_value = form.fields[pk_name].clean(raw_pk_value)
pk_value = getattr(pk_value, 'pk', pk_value)
obj = self._existing_object(pk_value)
if form in forms_to_delete:
self.deleted_objects.append(obj)
if commit:
obj.delete()
continue
if form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_queryset()
else:
qs = self.model._default_manager.get_queryset()
qs = qs.using(form.instance._state.db)
if form._meta.widgets:
widget = form._meta.widgets.get(self._pk_field.name, HiddenInput)
else:
widget = HiddenInput
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=widget)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet, extra=1, can_delete=False,
can_order=False, max_num=None, fields=None, exclude=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns a FormSet class for the given Django model class.
"""
# modelform_factory will produce the same warning/error, but that will be
# difficult to debug for code that needs upgrading, so we produce the
# warning here too. This logic is reproducing logic inside
# modelform_factory, but it can be removed once the deprecation cycle is
# complete, since the validation exception will produce a helpful
# stacktrace.
meta = getattr(form, 'Meta', None)
if meta is None:
meta = type(str('Meta'), (object,), {})
if (getattr(meta, 'fields', fields) is None and
getattr(meta, 'exclude', exclude) is None):
warnings.warn("Calling modelformset_factory without defining 'fields' or "
"'exclude' explicitly is deprecated",
DeprecationWarning, stacklevel=2)
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback,
widgets=widgets, localized_fields=localized_fields,
labels=labels, help_texts=help_texts, error_messages=error_messages)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete,
validate_max=validate_max)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
if queryset is None:
queryset = self.model._default_manager
if self.instance.pk:
qs = queryset.filter(**{self.fk.name: self.instance})
else:
qs = queryset.none()
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs, **kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do its validation.
setattr(form.instance, self.fk.get_attname(), self.instance.pk)
return form
@classmethod
def get_default_prefix(cls):
from django.db.models.fields.related import RelatedObject
return RelatedObject(cls.fk.rel.to, cls.model, cls.fk).get_accessor_name().replace('+','')
def save_new(self, form, commit=True):
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unles can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise Exception("fk_name '%s' is not a ForeignKey to %s" % (fk_name, parent_model))
elif len(fks_to_parent) == 0:
raise Exception("%s has no field named '%s'" % (model, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise Exception("%s has no ForeignKey to %s" % (model, parent_model))
else:
raise Exception("%s has more than 1 ForeignKey to %s" % (model, parent_model))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None, extra=3, can_order=False,
can_delete=True, max_num=None, formfield_callback=None,
widgets=None, validate_max=False, localized_fields=None,
labels=None, help_texts=None, error_messages=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
'widgets': widgets,
'validate_max': validate_max,
'localized_fields': localized_fields,
'labels': labels,
'help_texts': help_texts,
'error_messages': error_messages,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
widget = HiddenInput
default_error_messages = {
'invalid_choice': _('The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in self.empty_values:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_text(value) != force_text(orig):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return self.parent_instance
def _has_changed(self, initial, data):
return False
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield ("", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def __len__(self):
return len(self.queryset) +\
(1 if self.field.empty_label is not None else 0)
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _('Select a valid choice. That choice is not one of'
' the available choices.'),
}
def __init__(self, queryset, empty_label="---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text='', to_field_name=None, *args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.choice_cache = None
self.to_field_name = to_field_name
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_text(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in self.empty_values:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def validate(self, value):
return Field.validate(self, value)
def _has_changed(self, initial, data):
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return force_text(self.prepare_value(initial_value)) != force_text(data_value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _('Enter a list of values.'),
'invalid_choice': _('Select a valid choice. %(value)s is not one of the'
' available choices.'),
'invalid_pk_value': _('"%(pk)s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text='', *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
# Remove this in Django 1.8
if isinstance(self.widget, SelectMultiple) and not isinstance(self.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
elif not self.required and not value:
return self.queryset.none()
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'], code='list')
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(
self.error_messages['invalid_pk_value'],
code='invalid_pk_value',
params={'pk': pk},
)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set(force_text(getattr(o, key)) for o in qs)
for val in value:
if force_text(val) not in pks:
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def prepare_value(self, value):
if (hasattr(value, '__iter__') and
not isinstance(value, six.text_type) and
not hasattr(value, '_meta')):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in self.prepare_value(initial))
data_set = set(force_text(value) for value in data)
return data_set != initial_set
def modelform_defines_fields(form_class):
return (form_class is not None and (
hasattr(form_class, '_meta') and
(form_class._meta.fields is not None or
form_class._meta.exclude is not None)
))
| bsd-3-clause |
dondeli/php-buildpack-scs | lib/build_pack_utils/runner.py | 42 | 6178 | import os
import os.path
import sys
import tempfile
import subprocess
import logging
# This and check_output are shims to support features of Python 2.7
# on Python 2.6.
#
# This code was borrowed from PyPy 2.7.
# bitbucket.org/pypy/pypy/src/9d88b4875d6e/lib-python/2.7/subprocess.py
#
# This can be removed when the CloudFoundry environment is upgraded
# to Python 2.7 or higher.
#
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode)
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=subprocess.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def stream_output(*popenargs, **kwargs):
r"""Run command with arguments and stream its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute.
The first argument should be the file like object where the output
should be written. The remainder of the arguments are the same as
for the Popen constructor.
Example:
>>> fp = open('cmd-output.txt', 'wb')
>>> stream_output(fp, ["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> fp = open('cmd-output.txt', 'wb')
>>> stream_output(fp, ["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=subprocess.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if hasattr(popenargs[0], 'fileno'):
process = subprocess.Popen(stdout=popenargs[0],
*popenargs[1:], **kwargs)
retcode = process.wait()
else:
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs[1:], **kwargs)
for c in iter(lambda: process.stdout.read(1024), ''):
popenargs[0].write(c)
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
class BuildPack(object):
def __init__(self, ctx, url, branch=None, stream=sys.stdout):
self._ctx = ctx
self._url = url
self._branch = branch
self._stream = stream
self.bp_dir = tempfile.mkdtemp(prefix='buildpack')
self._log = logging.getLogger('runner')
def run(self):
if self._url:
self._clone()
self.framework = self._detect()
self._compile()
self.start_yml = self._release()
def _clone(self):
self._log.debug("Cloning [%s] to [%s]", self._url, self.bp_dir)
stream_output(self._stream,
" ".join(['git', 'clone', self._url, self.bp_dir]),
stderr=subprocess.STDOUT,
shell=True)
if self._branch:
self._log.debug("Branching to [%s]", self._branch)
stream_output(self._stream,
" ".join(['git', 'checkout', self._branch]),
stderr=subprocess.STDOUT,
shell=True)
def _detect(self):
self._log.debug("Running detect script")
cmd = [os.path.join(self.bp_dir, 'bin', 'detect'),
self._ctx['BUILD_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
def _compile(self):
self._log.debug("Running compile script with build dir [%s] "
"and cache dir [%s]",
self._ctx['BUILD_DIR'],
self._ctx['CACHE_DIR'])
cmd = [os.path.join(self.bp_dir, 'bin', 'compile'),
self._ctx['BUILD_DIR'],
self._ctx['CACHE_DIR']]
stream_output(self._stream,
" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True)
def _release(self):
self._log.debug("Running release script")
cmd = [os.path.join(self.bp_dir, 'bin', 'release'),
self._ctx['BUILD_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
| apache-2.0 |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/python/Lib/comtypes/test/test_createwrappers.py | 6 | 3854 | import glob
import os
import unittest
import warnings
import comtypes.typeinfo
import comtypes.client
import comtypes.client._generate
from comtypes.test import requires
requires("typelibs")
# filter warnings about interfaces without a base interface; they will
# be skipped in the code generation.
warnings.filterwarnings("ignore",
"Ignoring interface .* which has no base interface",
UserWarning)
# don't print messages when typelib wrappers are generated
comtypes.client._generate.__verbose__ = False
sysdir = os.path.join(os.environ["SystemRoot"], "system32")
progdir = os.environ["ProgramFiles"]
common_progdir = os.environ["CommonProgramFiles"]
# This test takes quite some time. It tries to build wrappers for ALL
# .dll, .tlb, and .ocx files in the system directory which contain typelibs.
class Test(unittest.TestCase):
def setUp(self):
"Do not write the generated files into the comtypes.gen directory"
comtypes.client.gen_dir = None
def tearDown(self):
comtypes.client.gen_dir = comtypes.client._find_gen_dir()
number = 0
def add_test(fname):
global number
def test(self):
try:
comtypes.typeinfo.LoadTypeLibEx(fname)
except WindowsError:
return
comtypes.client.GetModule(fname)
test.__doc__ = "test GetModule(%r)" % fname
setattr(Test, "test_%d" % number, test)
number += 1
for fname in glob.glob(os.path.join(sysdir, "*.ocx")):
add_test(fname)
for fname in glob.glob(os.path.join(sysdir, "*.tlb")):
add_test(fname)
for fname in glob.glob(os.path.join(progdir, r"Microsoft Office\Office*\*.tlb")):
if os.path.basename(fname).lower() in (
"grde50.olb", # UnicodeEncodeError
"xl5de32.olb", # UnicodeEncodeError
"grde50.olb", # UnicodeEncodeError
):
continue
add_test(fname)
for fname in glob.glob(os.path.join(progdir, r"Microsoft Office\Office*\*.olb")):
if os.path.basename(fname).lower() in (
"grde50.olb", # UnicodeEncodeError
"xl5de32.olb", # UnicodeEncodeError
"grde50.olb", # UnicodeEncodeError
):
continue
add_test(fname)
path = os.path.join(progdir, r"Microsoft Visual Studio .NET 2003\Visual Studio SDKs\DIA SDK\bin\msdia71.dll")
if os.path.isfile(path):
print "ADD", path
add_test(path)
for fname in glob.glob(os.path.join(common_progdir, r"Microsoft Shared\Speech\*.dll")):
add_test(fname)
for fname in glob.glob(os.path.join(sysdir, "*.dll")):
# these typelibs give errors:
if os.path.basename(fname).lower() in (
"syncom.dll", # interfaces without base interface
"msvidctl.dll", # assignment to None
"scardssp.dll", # assertionerror sizeof()
"sccsccp.dll", # assertionerror sizeof()
# Typeinfo in comsvcs.dll in XP 64-bit SP 1 is broken.
# Oleview decompiles this code snippet (^ marks are m):
#[
# odl,
# uuid(C7B67079-8255-42C6-9EC0-6994A3548780)
#]
#interface IAppDomainHelper : IDispatch {
# HRESULT _stdcall pfnShutdownCB(void* pv);
# HRESULT _stdcall Initialize(
# [in] IUnknown* pUnkAD,
# [in] IAppDomainHelper __MIDL_0028,
# ^^^^^^^^^^^^^^^^
# [in] void* pPool);
# HRESULT _stdcall pfnCallbackCB(void* pv);
# HRESULT _stdcall DoCallback(
# [in] IUnknown* pUnkAD,
# [in] IAppDomainHelper __MIDL_0029,
# ^^^^^^^^^^^^^^^^
# [in] void* pPool);
#};
"comsvcs.dll",
):
continue
add_test(fname)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
saneyuki/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/edgechromium.py | 3 | 4302 | from .base import Browser, ExecutorBrowser, require_arg
from .base import get_timeout_multiplier # noqa: F401
from ..webdriver_server import EdgeChromiumDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
from ..executors.executoredgechromium import EdgeChromiumDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "edgechromium",
"check_args": "check_args",
"browser": "EdgeChromiumBrowser",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "EdgeChromiumDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options",
"timeout_multiplier": "get_timeout_multiplier",}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, run_info_data,
**kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["supports_eager_pageload"] = False
capabilities = {
"goog:chromeOptions": {
"prefs": {
"profile": {
"default_content_setting_values": {
"popups": 1
}
}
},
"useAutomationExtension": False,
"excludeSwitches": ["enable-automation"],
"w3c": True
}
}
if test_type == "testharness":
capabilities["pageLoadStrategy"] = "none"
for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
if kwargs[kwarg] is not None:
capabilities["goog:chromeOptions"][capability] = kwargs[kwarg]
if kwargs["headless"]:
if "args" not in capabilities["goog:chromeOptions"]:
capabilities["goog:chromeOptions"]["args"] = []
if "--headless" not in capabilities["goog:chromeOptions"]["args"]:
capabilities["goog:chromeOptions"]["args"].append("--headless")
executor_kwargs["capabilities"] = capabilities
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {}
class EdgeChromiumBrowser(Browser):
"""MicrosoftEdge is backed by MSEdgeDriver, which is supplied through
``wptrunner.webdriver.EdgeChromiumDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="msedgedriver",
webdriver_args=None):
"""Creates a new representation of MicrosoftEdge. The `binary` argument gives
the browser binary to use for testing."""
Browser.__init__(self, logger)
self.binary = binary
self.server = EdgeChromiumDriverServer(self.logger,
binary=webdriver_binary,
args=webdriver_args)
def start(self, **kwargs):
self.server.start(block=False)
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}
| mpl-2.0 |
alincalinciuc/packstack | tests/test_ospluginutils.py | 12 | 2297 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest import TestCase
from test_base import PackstackTestCaseMixin
from packstack.modules.ospluginutils import gethostlist, \
validate_puppet_logfile, \
PackStackError
class OSPluginUtilsTestCase(PackstackTestCaseMixin, TestCase):
def test_gethostlist(self):
conf = {"A_HOST": "1.1.1.1", "B_HOSTS": "2.2.2.2,1.1.1.1",
"C_HOSTS": "3.3.3.3/vdc"}
hosts = gethostlist(conf)
hosts.sort()
self.assertEquals(['1.1.1.1', '2.2.2.2', '3.3.3.3'], hosts)
def test_validate_puppet_logfile(self):
filename = os.path.join(self.tempdir, "puppet.log")
fp = open(filename, "w")
fp.write("Everything went ok")
fp.close()
validate_puppet_logfile(filename)
def test_validate_puppet_logfile_error(self):
filename = os.path.join(self.tempdir, "puppet.log")
fp = open(filename, "w")
fp.write("No matching value for selector param 'Fedora' ...")
fp.close()
self.assertRaises(PackStackError, validate_puppet_logfile, filename)
def test_validate_puppet_logfile_okerror(self):
filename = os.path.join(self.tempdir, "puppet.log")
fp = open(filename, "w")
fp.write("err: Could not prefetch database_grant provider 'mysql': "
"Execution of '/usr/bin/mysql --defaults-file=/root/.my.cnf "
"mysql -Be describe user' returned 1: Could not open required"
" defaults file: /root/.my.cnf")
fp.close()
validate_puppet_logfile(filename)
| apache-2.0 |
kylerbrown/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
pranav01/Xeon_sprout | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
shaufi10/odoo | addons/l10n_be_coda/__init__.py | 430 | 1105 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_be_coda
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
perlygatekeeper/glowing-robot | Little_Alchemy_2/Scraper_python/env/lib/python3.7/site-packages/pip/_internal/__init__.py | 9 | 2797 | #!/usr/bin/env python
from __future__ import absolute_import
import locale
import logging
import os
import warnings
import sys
# 2016-06-17 [email protected]: urllib3 1.14 added optional support for socks,
# but if invoked (i.e. imported), it will issue a warning to stderr if socks
# isn't available. requests unconditionally imports urllib3's socks contrib
# module, triggering this warning. The warning breaks DEP-8 tests (because of
# the stderr output) and is just plain annoying in normal usage. I don't want
# to add socks as yet another dependency for pip, nor do I want to allow-stderr
# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to
# be done before the import of pip.vcs.
from pip._vendor.urllib3.exceptions import DependencyWarning
warnings.filterwarnings("ignore", category=DependencyWarning) # noqa
# We want to inject the use of SecureTransport as early as possible so that any
# references or sessions or what have you are ensured to have it, however we
# only want to do this in the case that we're running on macOS and the linked
# OpenSSL is too old to handle TLSv1.2
try:
import ssl
except ImportError:
pass
else:
# Checks for OpenSSL 1.0.1 on MacOS
if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f:
try:
from pip._vendor.urllib3.contrib import securetransport
except (ImportError, OSError):
pass
else:
securetransport.inject_into_urllib3()
from pip._internal.cli.autocompletion import autocomplete
from pip._internal.cli.main_parser import parse_command
from pip._internal.commands import commands_dict
from pip._internal.exceptions import PipError
from pip._internal.utils import deprecation
from pip._vendor.urllib3.exceptions import InsecureRequestWarning
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWarning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parse_command(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
# Needed for locale.getpreferredencoding(False) to work
# in pip._internal.utils.encoding.auto_decode
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
# setlocale can apparently crash if locale are uninitialized
logger.debug("Ignoring error %s when setting locale", e)
command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args))
return command.main(cmd_args)
| artistic-2.0 |
arifgursel/pyglet | pyglet/window/key.py | 22 | 10707 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Key constants and utilities for pyglet.window.
Usage::
from pyglet.window import Window
from pyglet.window import key
window = Window()
@window.event
def on_key_press(symbol, modifiers):
# Symbolic names:
if symbol == key.RETURN:
# Alphabet keys:
elif symbol == key.Z:
# Number keys:
elif symbol == key._1:
# Number keypad keys:
elif symbol == key.NUM_1:
# Modifiers:
if modifiers & key.MOD_CTRL:
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
class KeyStateHandler(dict):
'''Simple handler that tracks the state of keys on the keyboard. If a
key is pressed then this handler holds a True value for it.
For example::
>>> win = window.Window
>>> keyboard = key.KeyStateHandler()
>>> win.push_handlers(keyboard)
# Hold down the "up" arrow...
>>> keyboard[key.UP]
True
>>> keyboard[key.DOWN]
False
'''
def on_key_press(self, symbol, modifiers):
self[symbol] = True
def on_key_release(self, symbol, modifiers):
self[symbol] = False
def __getitem__(self, key):
return self.get(key, False)
def modifiers_string(modifiers):
'''Return a string describing a set of modifiers.
Example::
>>> modifiers_string(MOD_SHIFT | MOD_CTRL)
'MOD_SHIFT|MOD_CTRL'
:Parameters:
`modifiers` : int
Bitwise combination of modifier constants.
:rtype: str
'''
mod_names = []
if modifiers & MOD_SHIFT:
mod_names.append('MOD_SHIFT')
if modifiers & MOD_CTRL:
mod_names.append('MOD_CTRL')
if modifiers & MOD_ALT:
mod_names.append('MOD_ALT')
if modifiers & MOD_CAPSLOCK:
mod_names.append('MOD_CAPSLOCK')
if modifiers & MOD_NUMLOCK:
mod_names.append('MOD_NUMLOCK')
if modifiers & MOD_SCROLLLOCK:
mod_names.append('MOD_SCROLLLOCK')
if modifiers & MOD_COMMAND:
mod_names.append('MOD_COMMAND')
if modifiers & MOD_OPTION:
mod_names.append('MOD_OPTION')
if modifiers & MOD_FUNCTION:
mod_names.append('MOD_FUNCTION')
return '|'.join(mod_names)
def symbol_string(symbol):
'''Return a string describing a key symbol.
Example::
>>> symbol_string(BACKSPACE)
'BACKSPACE'
:Parameters:
`symbol` : int
Symbolic key constant.
:rtype: str
'''
if symbol < 1 << 32:
return _key_names.get(symbol, str(symbol))
else:
return 'user_key(%x)' % (symbol >> 32)
def motion_string(motion):
'''Return a string describing a text motion.
Example::
>>> motion_string(MOTION_NEXT_WORD):
'MOTION_NEXT_WORD'
:Parameters:
`motion` : int
Text motion constant.
:rtype: str
'''
return _motion_names.get(motion, str(motion))
def user_key(scancode):
'''Return a key symbol for a key not supported by pyglet.
This can be used to map virtual keys or scancodes from unsupported
keyboard layouts into a machine-specific symbol. The symbol will
be meaningless on any other machine, or under a different keyboard layout.
Applications should use user-keys only when user explicitly binds them
(for example, mapping keys to actions in a game options screen).
'''
assert scancode > 0
return scancode << 32
# Modifier mask constants
MOD_SHIFT = 1 << 0
MOD_CTRL = 1 << 1
MOD_ALT = 1 << 2
MOD_CAPSLOCK = 1 << 3
MOD_NUMLOCK = 1 << 4
MOD_WINDOWS = 1 << 5
MOD_COMMAND = 1 << 6
MOD_OPTION = 1 << 7
MOD_SCROLLLOCK = 1 << 8
MOD_FUNCTION = 1 << 9
#: Accelerator modifier. On Windows and Linux, this is ``MOD_CTRL``, on
#: Mac OS X it's ``MOD_COMMAND``.
MOD_ACCEL = MOD_CTRL
from pyglet import compat_platform
if compat_platform == 'darwin':
MOD_ACCEL = MOD_COMMAND
# Key symbol constants
# ASCII commands
BACKSPACE = 0xff08
TAB = 0xff09
LINEFEED = 0xff0a
CLEAR = 0xff0b
RETURN = 0xff0d
ENTER = 0xff0d # synonym
PAUSE = 0xff13
SCROLLLOCK = 0xff14
SYSREQ = 0xff15
ESCAPE = 0xff1b
SPACE = 0xff20
# Cursor control and motion
HOME = 0xff50
LEFT = 0xff51
UP = 0xff52
RIGHT = 0xff53
DOWN = 0xff54
PAGEUP = 0xff55
PAGEDOWN = 0xff56
END = 0xff57
BEGIN = 0xff58
# Misc functions
DELETE = 0xffff
SELECT = 0xff60
PRINT = 0xff61
EXECUTE = 0xff62
INSERT = 0xff63
UNDO = 0xff65
REDO = 0xff66
MENU = 0xff67
FIND = 0xff68
CANCEL = 0xff69
HELP = 0xff6a
BREAK = 0xff6b
MODESWITCH = 0xff7e
SCRIPTSWITCH = 0xff7e
FUNCTION = 0xffd2
# Text motion constants: these are allowed to clash with key constants
MOTION_UP = UP
MOTION_RIGHT = RIGHT
MOTION_DOWN = DOWN
MOTION_LEFT = LEFT
MOTION_NEXT_WORD = 1
MOTION_PREVIOUS_WORD = 2
MOTION_BEGINNING_OF_LINE = 3
MOTION_END_OF_LINE = 4
MOTION_NEXT_PAGE = PAGEDOWN
MOTION_PREVIOUS_PAGE = PAGEUP
MOTION_BEGINNING_OF_FILE = 5
MOTION_END_OF_FILE = 6
MOTION_BACKSPACE = BACKSPACE
MOTION_DELETE = DELETE
# Number pad
NUMLOCK = 0xff7f
NUM_SPACE = 0xff80
NUM_TAB = 0xff89
NUM_ENTER = 0xff8d
NUM_F1 = 0xff91
NUM_F2 = 0xff92
NUM_F3 = 0xff93
NUM_F4 = 0xff94
NUM_HOME = 0xff95
NUM_LEFT = 0xff96
NUM_UP = 0xff97
NUM_RIGHT = 0xff98
NUM_DOWN = 0xff99
NUM_PRIOR = 0xff9a
NUM_PAGE_UP = 0xff9a
NUM_NEXT = 0xff9b
NUM_PAGE_DOWN = 0xff9b
NUM_END = 0xff9c
NUM_BEGIN = 0xff9d
NUM_INSERT = 0xff9e
NUM_DELETE = 0xff9f
NUM_EQUAL = 0xffbd
NUM_MULTIPLY = 0xffaa
NUM_ADD = 0xffab
NUM_SEPARATOR = 0xffac
NUM_SUBTRACT = 0xffad
NUM_DECIMAL = 0xffae
NUM_DIVIDE = 0xffaf
NUM_0 = 0xffb0
NUM_1 = 0xffb1
NUM_2 = 0xffb2
NUM_3 = 0xffb3
NUM_4 = 0xffb4
NUM_5 = 0xffb5
NUM_6 = 0xffb6
NUM_7 = 0xffb7
NUM_8 = 0xffb8
NUM_9 = 0xffb9
# Function keys
F1 = 0xffbe
F2 = 0xffbf
F3 = 0xffc0
F4 = 0xffc1
F5 = 0xffc2
F6 = 0xffc3
F7 = 0xffc4
F8 = 0xffc5
F9 = 0xffc6
F10 = 0xffc7
F11 = 0xffc8
F12 = 0xffc9
F13 = 0xffca
F14 = 0xffcb
F15 = 0xffcc
F16 = 0xffcd
F17 = 0xffce
F18 = 0xffcf
F19 = 0xffd0
F20 = 0xffd1
# Modifiers
LSHIFT = 0xffe1
RSHIFT = 0xffe2
LCTRL = 0xffe3
RCTRL = 0xffe4
CAPSLOCK = 0xffe5
LMETA = 0xffe7
RMETA = 0xffe8
LALT = 0xffe9
RALT = 0xffea
LWINDOWS = 0xffeb
RWINDOWS = 0xffec
LCOMMAND = 0xffed
RCOMMAND = 0xffee
LOPTION = 0xffef
ROPTION = 0xfff0
# Latin-1
SPACE = 0x020
EXCLAMATION = 0x021
DOUBLEQUOTE = 0x022
HASH = 0x023
POUND = 0x023 # synonym
DOLLAR = 0x024
PERCENT = 0x025
AMPERSAND = 0x026
APOSTROPHE = 0x027
PARENLEFT = 0x028
PARENRIGHT = 0x029
ASTERISK = 0x02a
PLUS = 0x02b
COMMA = 0x02c
MINUS = 0x02d
PERIOD = 0x02e
SLASH = 0x02f
_0 = 0x030
_1 = 0x031
_2 = 0x032
_3 = 0x033
_4 = 0x034
_5 = 0x035
_6 = 0x036
_7 = 0x037
_8 = 0x038
_9 = 0x039
COLON = 0x03a
SEMICOLON = 0x03b
LESS = 0x03c
EQUAL = 0x03d
GREATER = 0x03e
QUESTION = 0x03f
AT = 0x040
BRACKETLEFT = 0x05b
BACKSLASH = 0x05c
BRACKETRIGHT = 0x05d
ASCIICIRCUM = 0x05e
UNDERSCORE = 0x05f
GRAVE = 0x060
QUOTELEFT = 0x060
A = 0x061
B = 0x062
C = 0x063
D = 0x064
E = 0x065
F = 0x066
G = 0x067
H = 0x068
I = 0x069
J = 0x06a
K = 0x06b
L = 0x06c
M = 0x06d
N = 0x06e
O = 0x06f
P = 0x070
Q = 0x071
R = 0x072
S = 0x073
T = 0x074
U = 0x075
V = 0x076
W = 0x077
X = 0x078
Y = 0x079
Z = 0x07a
BRACELEFT = 0x07b
BAR = 0x07c
BRACERIGHT = 0x07d
ASCIITILDE = 0x07e
_key_names = {}
_motion_names = {}
for _name, _value in locals().copy().items():
if _name[:2] != '__' and _name.upper() == _name and \
not _name.startswith('MOD_'):
if _name.startswith('MOTION_'):
_motion_names[_value] = _name
else:
_key_names[_value] = _name
| bsd-3-clause |
potatolondon/django-nonrel-1-4 | django/contrib/formtools/tests/wizard/wizardtests/forms.py | 313 | 2203 | import os
import tempfile
from django import forms
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import WizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(WizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'all_cleaned_data': self.get_all_cleaned_data(),
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
def get_context_data(self, form, **kwargs):
context = super(ContactWizard, self).get_context_data(form, **kwargs)
if self.storage.current_step == 'form2':
context.update({'another_var': True})
return context
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
UserFormSet = modelformset_factory(User, form=UserForm)
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
| bsd-3-clause |
YKonovalov/guesti | guesti/cloud/common.py | 1 | 1089 | # This file is part of GuestI.
#
# GuestI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SSP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GuestI. If not, see <http://www.gnu.org/licenses/>.
"""Contains top level info for cloud control modules."""
import abc
__provides__ = [
"ABS_CLOUD"
]
__all__ = __provides__
class ABS_CLOUD(object):
"""Represents an skeleton CLOUD class."""
__metaclass__ = abc.ABCMeta
__args = None
def __init__(self, args=None):
self.__args = args
@abc.abstractmethod
def install(self):
"""Make OS machine image by performing installation in the cloud."""
| gpl-3.0 |
XiaosongWei/blink-crosswalk | Tools/Scripts/webkitpy/thirdparty/coverage/misc.py | 66 | 3720 | """Miscellaneous stuff for Coverage."""
import inspect
from coverage.backward import md5, sorted # pylint: disable=W0622
from coverage.backward import string_class, to_bytes
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def format_lines(statements, lines):
"""Nicely format a list of line numbers.
Format a list of line numbers for printing by coalescing groups of lines as
long as the lines represent consecutive statements. This will coalesce
even if there are gaps between statements.
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
"""
pairs = []
i = 0
j = 0
start = None
while i < len(statements) and j < len(lines):
if statements[i] == lines[j]:
if start == None:
start = lines[j]
end = lines[j]
j += 1
elif start:
pairs.append((start, end))
start = None
i += 1
if start:
pairs.append((start, end))
ret = ', '.join(map(nice_pair, pairs))
return ret
def expensive(fn):
"""A decorator to cache the result of an expensive operation.
Only applies to methods with no arguments.
"""
attr = "_cache_" + fn.__name__
def _wrapped(self):
"""Inner fn that checks the cache."""
if not hasattr(self, attr):
setattr(self, attr, fn(self))
return getattr(self, attr)
return _wrapped
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
if len(regexes) > 1:
return "(" + ")|(".join(regexes) + ")"
elif regexes:
return regexes[0]
else:
return ""
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
self.md5 = md5()
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
self.md5.update(to_bytes(str(type(v))))
if isinstance(v, string_class):
self.md5.update(to_bytes(v))
elif isinstance(v, (int, float)):
self.update(str(v))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
def digest(self):
"""Retrieve the digest of the hash."""
return self.md5.digest()
class CoverageException(Exception):
"""An exception specific to Coverage."""
pass
class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
class ExceptionDuringRun(CoverageException):
"""An exception happened while running customer code.
Construct it with three arguments, the values from `sys.exc_info`.
"""
pass
| bsd-3-clause |
rouault/mapnik | scons/scons-local-2.3.4/SCons/Tool/MSCommon/vs.py | 9 | 19885 | #
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/MSCommon/vs.py 2014/09/27 12:51:43 garyo"
__doc__ = """Module to detect Visual Studio and/or Visual C/C++
"""
import os
import SCons.Errors
import SCons.Util
from common import debug, \
get_output, \
is_win64, \
normalize_env, \
parse_output, \
read_reg
import SCons.Tool.MSCommon.vc
class VisualStudio(object):
"""
An abstract base class for trying to find installed versions of
Visual Studio.
"""
def __init__(self, version, **kw):
self.version = version
kw['vc_version'] = kw.get('vc_version', version)
kw['sdk_version'] = kw.get('sdk_version', version)
self.__dict__.update(kw)
self._cache = {}
#
def find_batch_file(self):
vs_dir = self.get_vs_dir()
if not vs_dir:
debug('find_executable(): no vs_dir')
return None
batch_file = os.path.join(vs_dir, self.batch_file_path)
batch_file = os.path.normpath(batch_file)
if not os.path.isfile(batch_file):
debug('find_batch_file(): %s not on file system' % batch_file)
return None
return batch_file
def find_vs_dir_by_vc(self):
SCons.Tool.MSCommon.vc.get_installed_vcs()
dir = SCons.Tool.MSCommon.vc.find_vc_pdir(self.vc_version)
if not dir:
debug('find_vs_dir(): no installed VC %s' % self.vc_version)
return None
return dir
def find_vs_dir_by_reg(self):
root = 'Software\\'
if is_win64():
root = root + 'Wow6432Node\\'
for key in self.hkeys:
if key=='use_dir':
return self.find_vs_dir_by_vc()
key = root + key
try:
comps = read_reg(key)
except WindowsError, e:
debug('find_vs_dir_by_reg(): no VS registry key %s' % repr(key))
else:
debug('find_vs_dir_by_reg(): found VS in registry: %s' % comps)
return comps
return None
def find_vs_dir(self):
""" Can use registry or location of VC to find vs dir
First try to find by registry, and if that fails find via VC dir
"""
if True:
vs_dir=self.find_vs_dir_by_reg()
return vs_dir
else:
return self.find_vs_dir_by_vc()
def find_executable(self):
vs_dir = self.get_vs_dir()
if not vs_dir:
debug('find_executable(): no vs_dir (%s)'%vs_dir)
return None
executable = os.path.join(vs_dir, self.executable_path)
executable = os.path.normpath(executable)
if not os.path.isfile(executable):
debug('find_executable(): %s not on file system' % executable)
return None
return executable
#
def get_batch_file(self):
try:
return self._cache['batch_file']
except KeyError:
batch_file = self.find_batch_file()
self._cache['batch_file'] = batch_file
return batch_file
def get_executable(self):
try:
debug('get_executable using cache:%s'%self._cache['executable'])
return self._cache['executable']
except KeyError:
executable = self.find_executable()
self._cache['executable'] = executable
debug('get_executable not in cache:%s'%executable)
return executable
def get_vs_dir(self):
try:
return self._cache['vs_dir']
except KeyError:
vs_dir = self.find_vs_dir()
self._cache['vs_dir'] = vs_dir
return vs_dir
def get_supported_arch(self):
try:
return self._cache['supported_arch']
except KeyError:
# RDEVE: for the time being use hardcoded lists
# supported_arch = self.find_supported_arch()
self._cache['supported_arch'] = self.supported_arch
return self.supported_arch
def reset(self):
self._cache = {}
# The list of supported Visual Studio versions we know how to detect.
#
# How to look for .bat file ?
# - VS 2008 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\9.0\Setup\VC\productdir
# * from environmnent variable VS90COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2008 Express (WoW6432: 32 bits on windows x64):
# Software\Wow6432Node\Microsoft\VCEpress\9.0\Setup\VC\productdir
#
# - VS 2005 Express (x86):
# * from registry key productdir, gives the full path to vsvarsall.bat. In
# HKEY_LOCAL_MACHINE):
# Software\Microsoft\VCEpress\8.0\Setup\VC\productdir
# * from environmnent variable VS80COMNTOOLS: the path is then ..\..\VC
# relatively to the path given by the variable.
#
# - VS 2005 Express (WoW6432: 32 bits on windows x64): does not seem to have a
# productdir ?
#
# - VS 2003 .Net (pro edition ? x86):
# * from registry key productdir. The path is then ..\Common7\Tools\
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\7.1\Setup\VC\productdir
# * from environmnent variable VS71COMNTOOLS: the path is the full path to
# vsvars32.bat
#
# - VS 98 (VS 6):
# * from registry key productdir. The path is then Bin
# relatively to the key. The key is in HKEY_LOCAL_MACHINE):
# Software\Microsoft\VisualStudio\6.0\Setup\VC98\productdir
#
# The first version found in the list is the one used by default if
# there are multiple versions installed. Barring good reasons to
# the contrary, this means we should list versions from most recent
# to oldest. Pro versions get listed before Express versions on the
# assumption that, by default, you'd rather use the version you paid
# good money for in preference to whatever Microsoft makes available
# for free.
#
# If you update this list, update _VCVER and _VCVER_TO_PRODUCT_DIR in
# Tool/MSCommon/vc.py, and the MSVC_VERSION documentation in Tool/msvc.xml.
SupportedVSList = [
# Visual Studio 2013
VisualStudio('12.0',
vc_version='12.0',
sdk_version='8.1A',
hkeys=[r'Microsoft\VisualStudio\12.0\Setup\VS\ProductDir'],
common_tools_var='VS120COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2013 Express Edition (for Desktop)
VisualStudio('12.0Exp',
vc_version='12.0',
sdk_version='8.1A',
hkeys=[r'Microsoft\VisualStudio\12.0\Setup\VS\ProductDir'],
common_tools_var='VS120COMNTOOLS',
executable_path=r'Common7\IDE\WDExpress.exe',
batch_file_path=r'Common7\Tools\vsvars32.bat',
supported_arch=['x86', 'amd64'],
),
# Visual Studio 2012
VisualStudio('11.0',
sdk_version='8.0A',
hkeys=[r'Microsoft\VisualStudio\11.0\Setup\VS\ProductDir'],
common_tools_var='VS110COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2012 Express Edition (for Desktop)
VisualStudio('11.0Exp',
vc_version='11.0',
sdk_version='8.0A',
hkeys=[r'Microsoft\VisualStudio\11.0\Setup\VS\ProductDir'],
common_tools_var='VS110COMNTOOLS',
executable_path=r'Common7\IDE\WDExpress.exe',
batch_file_path=r'Common7\Tools\vsvars32.bat',
supported_arch=['x86', 'amd64'],
),
# Visual Studio 2010
VisualStudio('10.0',
sdk_version='7.0A',
hkeys=[r'Microsoft\VisualStudio\10.0\Setup\VS\ProductDir'],
common_tools_var='VS100COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2010 Express Edition
VisualStudio('10.0Exp',
vc_version='10.0',
sdk_version='7.0A',
hkeys=[r'Microsoft\VCExpress\10.0\Setup\VS\ProductDir'],
common_tools_var='VS100COMNTOOLS',
executable_path=r'Common7\IDE\VCExpress.exe',
batch_file_path=r'Common7\Tools\vsvars32.bat',
supported_arch=['x86'],
),
# Visual Studio 2008
VisualStudio('9.0',
sdk_version='6.0A',
hkeys=[r'Microsoft\VisualStudio\9.0\Setup\VS\ProductDir'],
common_tools_var='VS90COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2008 Express Edition
VisualStudio('9.0Exp',
vc_version='9.0',
sdk_version='6.0A',
hkeys=[r'Microsoft\VCExpress\9.0\Setup\VS\ProductDir'],
common_tools_var='VS90COMNTOOLS',
executable_path=r'Common7\IDE\VCExpress.exe',
batch_file_path=r'Common7\Tools\vsvars32.bat',
supported_arch=['x86'],
),
# Visual Studio 2005
VisualStudio('8.0',
sdk_version='6.0A',
hkeys=[r'Microsoft\VisualStudio\8.0\Setup\VS\ProductDir'],
common_tools_var='VS80COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86', 'amd64'],
),
# Visual C++ 2005 Express Edition
VisualStudio('8.0Exp',
vc_version='8.0Exp',
sdk_version='6.0A',
hkeys=[r'Microsoft\VCExpress\8.0\Setup\VS\ProductDir'],
common_tools_var='VS80COMNTOOLS',
executable_path=r'Common7\IDE\VCExpress.exe',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio 8',
supported_arch=['x86'],
),
# Visual Studio .NET 2003
VisualStudio('7.1',
sdk_version='6.0',
hkeys=[r'Microsoft\VisualStudio\7.1\Setup\VS\ProductDir'],
common_tools_var='VS71COMNTOOLS',
executable_path=r'Common7\IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio .NET 2003',
supported_arch=['x86'],
),
# Visual Studio .NET
VisualStudio('7.0',
sdk_version='2003R2',
hkeys=[r'Microsoft\VisualStudio\7.0\Setup\VS\ProductDir'],
common_tools_var='VS70COMNTOOLS',
executable_path=r'IDE\devenv.com',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio .NET',
supported_arch=['x86'],
),
# Visual Studio 6.0
VisualStudio('6.0',
sdk_version='2003R1',
hkeys=[r'Microsoft\VisualStudio\6.0\Setup\Microsoft Visual Studio\ProductDir',
'use_dir'],
common_tools_var='VS60COMNTOOLS',
executable_path=r'Common\MSDev98\Bin\MSDEV.COM',
batch_file_path=r'Common7\Tools\vsvars32.bat',
default_dirname='Microsoft Visual Studio',
supported_arch=['x86'],
),
]
SupportedVSMap = {}
for vs in SupportedVSList:
SupportedVSMap[vs.version] = vs
# Finding installed versions of Visual Studio isn't cheap, because it
# goes not only to the registry but also to the disk to sanity-check
# that there is, in fact, a Visual Studio directory there and that the
# registry entry isn't just stale. Find this information once, when
# requested, and cache it.
InstalledVSList = None
InstalledVSMap = None
def get_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
if InstalledVSList is None:
InstalledVSList = []
InstalledVSMap = {}
for vs in SupportedVSList:
debug('trying to find VS %s' % vs.version)
if vs.get_executable():
debug('found VS %s' % vs.version)
InstalledVSList.append(vs)
InstalledVSMap[vs.version] = vs
return InstalledVSList
def reset_installed_visual_studios():
global InstalledVSList
global InstalledVSMap
InstalledVSList = None
InstalledVSMap = None
for vs in SupportedVSList:
vs.reset()
# Need to clear installed VC's as well as they are used in finding
# installed VS's
SCons.Tool.MSCommon.vc.reset_installed_vcs()
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
#SDKEnvironmentUpdates = {}
#
#def set_sdk_by_directory(env, sdk_dir):
# global SDKEnvironmentUpdates
# try:
# env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
# except KeyError:
# env_tuple_list = []
# SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
#
# include_path = os.path.join(sdk_dir, 'include')
# mfc_path = os.path.join(include_path, 'mfc')
# atl_path = os.path.join(include_path, 'atl')
#
# if os.path.exists(mfc_path):
# env_tuple_list.append(('INCLUDE', mfc_path))
# if os.path.exists(atl_path):
# env_tuple_list.append(('INCLUDE', atl_path))
# env_tuple_list.append(('INCLUDE', include_path))
#
# env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
# env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
#
# for variable, directory in env_tuple_list:
# env.PrependENVPath(variable, directory)
def msvs_exists():
return (len(get_installed_visual_studios()) > 0)
def get_vs_by_version(msvs):
global InstalledVSMap
global SupportedVSMap
debug('vs.py:get_vs_by_version()')
if msvs not in SupportedVSMap:
msg = "Visual Studio version %s is not supported" % repr(msvs)
raise SCons.Errors.UserError(msg)
get_installed_visual_studios()
vs = InstalledVSMap.get(msvs)
debug('InstalledVSMap:%s'%InstalledVSMap)
debug('vs.py:get_vs_by_version: found vs:%s'%vs)
# Some check like this would let us provide a useful error message
# if they try to set a Visual Studio version that's not installed.
# However, we also want to be able to run tests (like the unit
# tests) on systems that don't, or won't ever, have it installed.
# It might be worth resurrecting this, with some configurable
# setting that the tests can use to bypass the check.
#if not vs:
# msg = "Visual Studio version %s is not installed" % repr(msvs)
# raise SCons.Errors.UserError, msg
return vs
def get_default_version(env):
"""Returns the default version string to use for MSVS.
If no version was requested by the user through the MSVS environment
variable, query all the available the visual studios through
get_installed_visual_studios, and take the highest one.
Return
------
version: str
the default version.
"""
if 'MSVS' not in env or not SCons.Util.is_Dict(env['MSVS']):
# get all versions, and remember them for speed later
versions = [vs.version for vs in get_installed_visual_studios()]
env['MSVS'] = {'VERSIONS' : versions}
else:
versions = env['MSVS'].get('VERSIONS', [])
if 'MSVS_VERSION' not in env:
if versions:
env['MSVS_VERSION'] = versions[0] #use highest version by default
else:
debug('get_default_version: WARNING: no installed versions found, '
'using first in SupportedVSList (%s)'%SupportedVSList[0].version)
env['MSVS_VERSION'] = SupportedVSList[0].version
env['MSVS']['VERSION'] = env['MSVS_VERSION']
return env['MSVS_VERSION']
def get_default_arch(env):
"""Return the default arch to use for MSVS
if no version was requested by the user through the MSVS_ARCH environment
variable, select x86
Return
------
arch: str
"""
arch = env.get('MSVS_ARCH', 'x86')
msvs = InstalledVSMap.get(env['MSVS_VERSION'])
if not msvs:
arch = 'x86'
elif not arch in msvs.get_supported_arch():
fmt = "Visual Studio version %s does not support architecture %s"
raise SCons.Errors.UserError(fmt % (env['MSVS_VERSION'], arch))
return arch
def merge_default_version(env):
version = get_default_version(env)
arch = get_default_arch(env)
def msvs_setup_env(env):
batfilename = msvs.get_batch_file()
msvs = get_vs_by_version(version)
if msvs is None:
return
# XXX: I think this is broken. This will silently set a bogus tool instead
# of failing, but there is no other way with the current scons tool
# framework
if batfilename is not None:
vars = ('LIB', 'LIBPATH', 'PATH', 'INCLUDE')
msvs_list = get_installed_visual_studios()
vscommonvarnames = [vs.common_tools_var for vs in msvs_list]
save_ENV = env['ENV']
nenv = normalize_env(env['ENV'],
['COMSPEC'] + vscommonvarnames,
force=True)
try:
output = get_output(batfilename, arch, env=nenv)
finally:
env['ENV'] = save_ENV
vars = parse_output(output, vars)
for k, v in vars.items():
env.PrependENVPath(k, v, delete_existing=1)
def query_versions():
"""Query the system to get available versions of VS. A version is
considered when a batfile is found."""
msvs_list = get_installed_visual_studios()
versions = [msvs.version for msvs in msvs_list]
return versions
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
tlksio/tlksio | env/lib/python3.4/site-packages/django/db/models/base.py | 16 | 75879 | from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection,
connections, router, transaction,
)
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields.related import (
ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation,
)
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.signals import (
class_prepared, post_init, post_save, pre_init, pre_save,
)
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
@python_2_unicode_compatible
class Deferred(object):
def __repr__(self):
return str('<Deferred field>')
def __str__(self):
return str('<Deferred field>')
DEFERRED = Deferred()
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_attrs = {'__module__': module}
classcell = attrs.pop('__classcell__', None)
if classcell is not None:
new_attrs['__classcell__'] = classcell
new_class = super_new(cls, name, bases, new_attrs)
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes |= set(base.__dict__.keys())
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name." % (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (field.name not in field_names and
field.name not in new_class.__dict__ and
field.name not in inherited_attributes):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
new_class.add_to_class(field.name, copy.deepcopy(field))
if base_meta and base_meta.abstract and not abstract:
new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes]
# Set the name of _meta.indexes. This can't be done in
# Options.contribute_to_class() because fields haven't been added
# to the model at that point.
for index in new_class._meta.indexes:
if not index.name:
index.set_name_with_model(new_class)
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
if not opts.managers or cls._requires_legacy_default_manager():
if any(f.name == 'objects' for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class('objects', manager)
class_prepared.send(sender=cls)
def _requires_legacy_default_manager(cls): # RemovedInDjango20Warning
opts = cls._meta
if opts.manager_inheritance_from_future:
return False
future_default_manager = opts.default_manager
# Step 1: Locate a manager that would have been promoted
# to default manager with the legacy system.
for manager in opts.managers:
originating_model = manager._originating_model
if (cls is originating_model or cls._meta.proxy or
originating_model._meta.abstract):
if manager is not cls._default_manager and not opts.default_manager_name:
warnings.warn(
"Managers from concrete parents will soon qualify as default "
"managers if they appear before any other managers in the "
"MRO. As a result, '{legacy_default_manager}' declared on "
"'{legacy_default_manager_model}' will no longer be the "
"default manager for '{model}' in favor of "
"'{future_default_manager}' declared on "
"'{future_default_manager_model}'. "
"You can redeclare '{legacy_default_manager}' on '{cls}' "
"to keep things the way they are or you can switch to the new "
"behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
legacy_default_manager=manager.name,
legacy_default_manager_model=manager._originating_model._meta.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
opts.default_manager_name = manager.name
opts._expire_cache()
break
# Step 2: Since there are managers but none of them qualified as
# default managers under the legacy system (meaning that there are
# managers from concrete parents that would be promoted under the
# new system), we need to create a new Manager instance for the
# 'objects' attribute as a deprecation shim.
else:
# If the "future" default manager was auto created there is no
# point warning the user since it's basically the same manager.
if not future_default_manager.auto_created:
warnings.warn(
"Managers from concrete parents will soon qualify as "
"default managers. As a result, the 'objects' manager "
"won't be created (or recreated) automatically "
"anymore on '{model}' and '{future_default_manager}' "
"declared on '{future_default_manager_model}' will be "
"promoted to default manager. You can declare "
"explicitly `objects = models.Manager()` on '{cls}' "
"to keep things the way they are or you can switch "
"to the new behavior right away by setting "
"`Meta.manager_inheritance_from_future` to `True`.".format(
cls=cls.__name__,
model=opts.label,
future_default_manager=future_default_manager.name,
future_default_manager_model=future_default_manager._originating_model._meta.label,
),
RemovedInDjango20Warning, 2
)
return True
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
def __init__(self, *args, **kwargs):
# Alias some things as locals to avoid repeat global lookups
cls = self.__class__
opts = self._meta
_setattr = setattr
_DEFERRED = DEFERRED
pre_init.send(sender=cls, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
if len(args) > len(opts.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(opts.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(opts.fields)
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not _DEFERRED:
_setattr(self, field.name, rel_obj)
else:
if val is not _DEFERRED:
_setattr(self, field.attname, val)
if kwargs:
property_names = opts._property_names
for prop in tuple(kwargs):
try:
# Any remaining kwargs must correspond to properties or
# virtual fields.
if prop in property_names or opts.get_field(prop):
if kwargs[prop] is not _DEFERRED:
_setattr(self, prop, kwargs[prop])
del kwargs[prop]
except (AttributeError, FieldDoesNotExist):
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
post_init.send(sender=cls, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values = list(values)
values.reverse()
values = [values.pop() if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return str('%s object' % self.__class__.__name__)
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled model instance's Django version %s does not match "
"the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
db_instance_qs = self.__class__._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val or (local_val is None and related_val is None):
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
delattr(obj, field.remote_field.get_cache_name())
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
pre_save.send(
sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields,
)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
post_save.send(
sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using,
)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None and
getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if f is not meta.auto_field]
update_pk = meta.auto_field and not pk_set
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self.__class__._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
# TODO: Handle multiple backends with different feature flags.
if (lookup_value is None or
(lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)):
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = (
cls._check_id_field() +
cls._check_field_name_clashes() +
cls._check_model_name_db_lookup_clashes()
)
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_model_name_db_lookup_clashes(cls):
errors = []
model_name = cls.__name__
if model_name.startswith('_') or model_name.endswith('_'):
errors.append(
checks.Error(
"The model name '%s' cannot start or end with an underscore "
"as it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E023'
)
)
elif LOOKUP_SEP in model_name:
errors.append(
checks.Error(
"The model name '%s' cannot contain double underscores as "
"it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E024'
)
)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model '%s'."
% (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by only one field).",
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if LOOKUP_SEP not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if f.db_column is None and column_name is not None and len(column_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, six.string_types):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i)
def method_get_order(ordered_obj, self):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
'get_%s_order' % model.__name__.lower(),
curry(method_get_order, model)
)
setattr(
related_model,
'set_%s_order' % model.__name__.lower(),
curry(method_set_order, model)
)
########
# MISC #
########
def model_unpickle(model_id):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
| mit |
bmoar/ansible | lib/ansible/plugins/callback/default.py | 46 | 7038 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
self._display.display(msg, color='red')
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
if result._task.ignore_errors:
self._display.display("...ignoring", color='cyan')
def v2_runner_on_ok(self, result):
if result._task.action == 'include':
msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
color = 'cyan'
elif result._result.get('changed', False):
msg = "changed: [%s]" % result._host.get_name()
color = 'yellow'
else:
msg = "ok: [%s]" % result._host.get_name()
color = 'green'
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
self._handle_warnings(result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
msg = "skipping: [%s]" % result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color='cyan')
def v2_runner_on_unreachable(self, result):
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color='cyan')
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
self._display.banner("TASK [%s]" % task.get_name().strip())
def v2_playbook_on_cleanup_task_start(self, task):
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
def v2_playbook_on_handler_task_start(self, task):
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = "PLAY"
else:
msg = "PLAY [%s]" % name
self._display.banner(msg)
def v2_on_file_diff(self, result):
if 'diff' in result._result and result._result['diff']:
self._display.display(self._get_diff(result._result['diff']))
def v2_playbook_item_on_ok(self, result):
if result._task.action == 'include':
msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
color = 'cyan'
elif result._result.get('changed', False):
msg = "changed: [%s]" % result._host.get_name()
color = 'yellow'
else:
msg = "ok: [%s]" % result._host.get_name()
color = 'green'
msg += " => (item=%s)" % result._result['item']
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
def v2_playbook_item_on_failed(self, result):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
self._display.display(msg, color='red')
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
self._handle_warnings(result._result)
def v2_playbook_item_on_skipped(self, result):
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item'])
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color='cyan')
| gpl-3.0 |
JCash/voronoi | test/makereport.py | 1 | 8207 | #!/usr/bin/python
import os
import sys
import time
import subprocess
import argparse
from collections import OrderedDict
def find_time_unit(t):
if t < 0.000001:
return 'ns'
if t < 0.001:
return '\xc2\xb5s'
if t < 0.1:
return 'ms'
return 's'
def convert_time(t, unit):
if unit == 'ns':
return t / 1000000000.0
if unit == '\xc2\xb5s':
return t / 1000000.0
if unit == 'ms':
return t / 1000.0
return t
def parse_log(report, reportpath):
counts = []
with open(reportpath, 'rb') as f:
n = -1
iterations = -1
for line in f:
if line.startswith('# n '):
tokens = line.split()
n = int(tokens[2])
iterations = int(tokens[4])
if not n in counts:
counts.append(n)
continue
if line.startswith('#'):
continue
tokens = line.split()
name = tokens[0] # container name
testname = tokens[1]
if not testname in report['timings']:
report['memory'][testname] = OrderedDict()
report['allocations'][testname] = OrderedDict()
report['timings'][testname] = OrderedDict()
if not name in report['timings'][testname]:
report['memory'][testname][name] = list()
report['allocations'][testname][name] = list()
report['timings'][testname][name] = list()
if 'used' == tokens[2]:
memory = int(tokens[3])
allocations = int(tokens[6])
report['memory'][testname][name].append(memory)
report['allocations'][testname][name].append(allocations)
else: # timings
index = tokens.index("min:") # avg, median, min, max
timing = float(tokens[index+1])
unit = tokens[index+2]
timing = convert_time(timing, unit)
report['timings'][testname][name].append(timing)
return counts
def collect_table_data(counts, report, tabledata):
for category, tests in report.iteritems():
for testname, results in tests.iteritems():
if testname in ['title', 'scale', 'unit']:
continue
if not category in tabledata:
tabledata[category] = OrderedDict()
if not testname in tabledata[category]:
tabledata[category][testname] = OrderedDict()
if not 'counts' in tabledata[category][testname]:
tabledata[category][testname]['counts'] = list()
tabledata[category][testname]['counts'].extend(counts)
for name, values in results.iteritems():
if not name in tabledata[category][testname]:
tabledata[category][testname][name] = list()
if name in ['title', 'scale', 'unit']:
tabledata[category][testname][name] = values
continue
tabledata[category][testname][name].extend(values)
def make_table_report(data):
usediff = False
for category, tests in data.iteritems():
totaldiff = 0.0
for testname, results in tests.iteritems():
if testname in ['title', 'scale', 'formatter', 'unit']:
continue
columns = list()
for name, values in results.iteritems():
if len(values) < len(results['counts']):
values.extend( (len(results['counts']) - len(values)) * [0.0])
columns.append( [name]+values )
formatter = tests['formatter']
scale = tests['scale']
title = tests['title']
matrix = zip(*columns)
rows = [list(matrix[0])]
for row in matrix[1:]:
rows.append( [str(row[0])] + map(formatter, map(lambda x: scale * x, row[1:]) ) )
lengths = [0] * len(rows[0])
for row in rows:
for ic, v in enumerate(row):
lengths[ic] = max(lengths[ic], len(v))
# header
headers = []
headersunderline = []
for ic, v in enumerate(rows[0]):
length = lengths[ic]
headers.append( ' ' + v.ljust(length) + ' ' )
if ic == 0:
headersunderline.append( '-' * (length + 1) + ':' )
else:
headersunderline.append( '-' * (length + 2) )
print "## " + title + " " + testname
print ""
print '|' + '|'.join(headers) + '|'
print '|' + '|'.join(headersunderline) + '|'
for row in rows[1:]:
values = []
for ic, v in enumerate(row):
length = lengths[ic]
value = v.ljust(length)
values.append( ' ' + value + ' ')
print '|' + '|'.join(values) + '|',
if not usediff:
print ""
diff = 0.0
if usediff:
tokens = values[-1].split()
diff = float(tokens[0]) - float(values[-2].split()[0])
print diff, tokens[1]
if usediff:
totaldiff += diff
print ""
print ""
if usediff:
print "Total diff:", totaldiff
def make_timings_report(input_path):
report = OrderedDict()
report['timings'] = OrderedDict()
report['memory'] = OrderedDict()
report['allocations'] = OrderedDict()
report['timings']['title'] = 'Timings (microseconds)'
report['timings']['scale'] = 1000000.0
report['timings']['unit'] = 'us'
report['memory']['title'] = 'Memory (kb)'
report['memory']['scale'] = 1 / (1024.0 * 1024.0)
report['memory']['unit'] = 'mb'
report['allocations']['title'] = 'Num Allocations'
report['allocations']['scale'] = 1
report['allocations']['unit'] = ''
counts = parse_log(report, input_path)
tabledata = OrderedDict()
tabledata['timings'] = OrderedDict()
tabledata['memory'] = OrderedDict()
tabledata['allocations'] = OrderedDict()
collect_table_data(counts, report, tabledata)
#del tabledata['memory']
#del tabledata['allocations']
tabledata['timings']['title'] = 'Timings'
tabledata['timings']['scale'] = 1000.0
tabledata['timings']['formatter'] = lambda x: '%.4f ms' % x
if 'memory' in tabledata:
tabledata['memory']['title'] = 'Memory'
tabledata['memory']['scale'] = 1 / 1024.0
tabledata['memory']['formatter'] = lambda x: '%d kb' % x
if 'allocations' in tabledata:
tabledata['allocations']['title'] = 'Num Allocations'
tabledata['allocations']['scale'] = 1
tabledata['allocations']['formatter'] = lambda x: str(x)
# Render to output to table format
make_table_report(tabledata)
def make_file_size_report(path, regex):
print path, regex
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-m', '--mode', default='timings', choices=['timings', 'filesize'], help='Enables file size report')
parser.add_argument('--regex', nargs='+', help='Matches to be made')
parser.add_argument('-i', '--input', help='The input file/directory')
args = parser.parse_args()
if not args.input:
print("Need a log file from the test run")
sys.exit(1)
timestart = time.time()
if args.mode == 'filesize':
make_file_size_report(args.input, args.regex)
else:
make_timings_report(args.input)
timeend = time.time()
print "# Report made in %f seconds" % (timeend - timestart)
| mit |
AuyaJackie/odoo | addons/hw_proxy/__openerp__.py | 313 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hyphyphyph/lascaux | instlatte/subsystem.py | 1 | 1214 | # -*- coding: utf-8 -*-
import weakref
from instlatte.logger import logger
logger = logger(__name__)
class Subsystem(object):
manager = None
package_dir = None
module_name = None
name = None
config = dict()
plugins = list()
def __init__(self, manager=None, package_dir=None, module_name=None, subsystem=None):
if not subsystem:
self.manager = weakref.proxy(manager)
self.package_dir = package_dir
self.module_name = module_name
self.config = self.manager.config['subsystems'][module_name] or dict()
else:
self.manager = subsystem.manager
self.package_dir = subsystem.package_dir
self.module_name = subsystem.module_name
self.config = subsystem.config
self.name = self.module_name
self.plugins = list()
def setup(self):
pass
def execute(self, cmd, *args, **kwargs):
if 'exec_%s' % cmd in dir(self):
return getattr(self, 'exec_%s' % cmd)(*args, **kwargs)
return None
def _is_loaded(self):
return Subsystem in self.__class__.__bases__ and True or False
is_loaded = property(_is_loaded)
| mit |
pipsiscool/audacity | lib-src/lv2/serd/waflib/Tools/c.py | 329 | 1066 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib import TaskGen,Task,Utils
from waflib.Tools import c_preproc
from waflib.Tools.ccroot import link_task,stlink_task
@TaskGen.extension('.c')
def c_hook(self,node):
return self.create_compiled_task('c',node)
class c(Task.Task):
run_str='${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${CPPFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}'
vars=['CCDEPS']
ext_in=['.h']
scan=c_preproc.scan
class cprogram(link_task):
run_str='${LINK_CC} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB}'
ext_out=['.bin']
vars=['LINKDEPS']
inst_to='${BINDIR}'
class cshlib(cprogram):
inst_to='${LIBDIR}'
class cstlib(stlink_task):
pass
| mit |
Blahus/acemod-ACE3 | tools/build.py | 4 | 2348 | #!/usr/bin/env python3
import os
import sys
import subprocess
def mod_time(path):
if not os.path.isdir(path):
return os.path.getmtime(path)
maxi = os.path.getmtime(path)
for p in os.listdir(path):
maxi = max(mod_time(os.path.join(path, p)), maxi)
return maxi
def check_for_changes(addonspath, module):
if not os.path.exists(os.path.join(addonspath, "ace_{}.pbo".format(module))):
return True
return mod_time(os.path.join(addonspath, module)) > mod_time(os.path.join(addonspath, "ace_{}.pbo".format(module)))
def check_for_obsolete_pbos(addonspath, file):
module = file[4:-4]
if not os.path.exists(os.path.join(addonspath, module)):
return True
return False
def main():
print("""
####################
# ACE3 Debug Build #
####################
""")
scriptpath = os.path.realpath(__file__)
projectpath = os.path.dirname(os.path.dirname(scriptpath))
addonspath = os.path.join(projectpath, "addons")
os.chdir(addonspath)
made = 0
failed = 0
skipped = 0
removed = 0
for file in os.listdir(addonspath):
if os.path.isfile(file):
if check_for_obsolete_pbos(addonspath, file):
removed += 1
print(" Removing obsolete file => " + file)
os.remove(file)
print("")
for p in os.listdir(addonspath):
path = os.path.join(addonspath, p)
if not os.path.isdir(path):
continue
if p[0] == ".":
continue
if not check_for_changes(addonspath, p):
skipped += 1
print(" Skipping {}.".format(p))
continue
print("# Making {} ...".format(p))
try:
subprocess.check_output([
"makepbo",
"-NUP",
"-@=z\\addons\\ace\\{}".format(p),
p,
"ace_{}.pbo".format(p)
], stderr=subprocess.STDOUT)
except:
failed += 1
print(" Failed to make {}.".format(p))
else:
made += 1
print(" Successfully made {}.".format(p))
print("\n# Done.")
print(" Made {}, skipped {}, removed {}, failed to make {}.".format(made, skipped, removed, failed))
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 |
alexras/pylsdj | pylsdj/test_clock.py | 2 | 1373 | import os
from nose.tools import assert_equal
from .project import load_lsdsng
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
def test_read_clocks():
proj = load_lsdsng(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'))
project_clock = proj.song.clock
total_clock = proj.song.global_clock
print(project_clock)
print(total_clock)
print((total_clock.checksum))
assert_equal(5, project_clock.hours)
assert_equal(47, project_clock.minutes)
assert_equal(57, total_clock.days)
assert_equal(1, total_clock.hours)
assert_equal(11, total_clock.minutes)
def test_set_local_clock():
proj = load_lsdsng(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'))
project_clock = proj.song.clock
project_clock.hours = 2
project_clock.minutes = 22
assert_equal(2, proj.song.clock.hours)
assert_equal(22, proj.song.clock.minutes)
def test_set_global_clock():
proj = load_lsdsng(
os.path.join(SCRIPT_DIR, 'test_data', 'UNTOLDST.lsdsng'))
proj.song.global_clock.days = 5
proj.song.global_clock.hours = 14
proj.song.global_clock.minutes = 20
assert_equal(5, proj.song.global_clock.days)
assert_equal(14, proj.song.global_clock.hours)
assert_equal(20, proj.song.global_clock.minutes)
assert_equal(39, proj.song.global_clock.checksum)
| mit |
aaltinisik/OCBAltinkaya | addons/account_analytic_analysis/__openerp__.py | 262 | 2243 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Contracts Management',
'version': '1.1',
'category': 'Sales Management',
'description': """
This module is for modifying account analytic view to show important data to project manager of services companies.
===================================================================================================================
Adds menu to show relevant information to each manager.You can also view the report of account analytic summary user-wise as well as month-wise.
""",
'author': 'Camptocamp / Odoo',
'website': 'https://www.odoo.com/page/billing',
'depends': ['hr_timesheet_invoice', 'sale'], #although sale is technically not required to install this module, all menuitems are located under 'Sales' application
'data': [
'security/ir.model.access.csv',
'security/account_analytic_analysis_security.xml',
'account_analytic_analysis_view.xml',
'account_analytic_analysis_cron.xml',
'res_config_view.xml',
'views/account_analytic_analysis.xml',
],
'demo': ['analytic_account_demo.xml'],
'test': ['test/account_analytic_analysis.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
willingc/oh-mainline | vendor/packages/gdata/tests/gdata_tests/health/service_test.py | 127 | 8108 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Eric Bidelman)'
import getpass
import unittest
from gdata import test_data
import gdata.health
import gdata.health.service
username = ''
password = ''
class HealthQueryProfileListTest(unittest.TestCase):
def setUp(self):
self.health = gdata.health.service.HealthService()
self.health.ClientLogin(username, password, source='Health Client Unit Tests')
self.profile_list_feed = self.health.GetProfileListFeed()
def testGetProfileListFeed(self):
self.assert_(isinstance(self.profile_list_feed,
gdata.health.ProfileListFeed))
self.assertEqual(self.profile_list_feed.id.text,
'https://www.google.com/health/feeds/profile/list')
first_entry = self.profile_list_feed.entry[0]
self.assert_(isinstance(first_entry, gdata.health.ProfileListEntry))
self.assert_(first_entry.GetProfileId() is not None)
self.assert_(first_entry.GetProfileName() is not None)
query = gdata.health.service.HealthProfileListQuery()
profile_list = self.health.GetProfileListFeed(query)
self.assertEqual(first_entry.GetProfileId(),
profile_list.entry[0].GetProfileId())
self.assertEqual(profile_list.id.text,
'https://www.google.com/health/feeds/profile/list')
class H9QueryProfileListTest(unittest.TestCase):
def setUp(self):
self.h9 = gdata.health.service.HealthService(use_h9_sandbox=True)
self.h9.ClientLogin(username, password, source='H9 Client Unit Tests')
self.profile_list_feed = self.h9.GetProfileListFeed()
def testGetProfileListFeed(self):
self.assert_(isinstance(self.profile_list_feed,
gdata.health.ProfileListFeed))
self.assertEqual(self.profile_list_feed.id.text,
'https://www.google.com/h9/feeds/profile/list')
first_entry = self.profile_list_feed.entry[0]
self.assert_(isinstance(first_entry, gdata.health.ProfileListEntry))
self.assert_(first_entry.GetProfileId() is not None)
self.assert_(first_entry.GetProfileName() is not None)
query = gdata.health.service.HealthProfileListQuery()
profile_list = self.h9.GetProfileListFeed(query)
self.assertEqual(first_entry.GetProfileId(),
profile_list.entry[0].GetProfileId())
self.assertEqual(profile_list.id.text,
'https://www.google.com/h9/feeds/profile/list')
class HealthQueryProfileTest(unittest.TestCase):
def setUp(self):
self.health = gdata.health.service.HealthService()
self.health.ClientLogin(username, password, source='Health Client Unit Tests')
self.profile_list_feed = self.health.GetProfileListFeed()
self.profile_id = self.profile_list_feed.entry[0].GetProfileId()
def testGetProfileFeed(self):
feed = self.health.GetProfileFeed(profile_id=self.profile_id)
self.assert_(isinstance(feed, gdata.health.ProfileFeed))
self.assert_(isinstance(feed.entry[0].ccr, gdata.health.Ccr))
def testGetProfileFeedByQuery(self):
query = gdata.health.service.HealthProfileQuery(
projection='ui', profile_id=self.profile_id)
feed = self.health.GetProfileFeed(query=query)
self.assert_(isinstance(feed, gdata.health.ProfileFeed))
self.assert_(feed.entry[0].ccr is not None)
def testGetProfileDigestFeed(self):
query = gdata.health.service.HealthProfileQuery(
projection='ui', profile_id=self.profile_id,
params={'digest': 'true'})
feed = self.health.GetProfileFeed(query=query)
self.assertEqual(len(feed.entry), 1)
def testGetMedicationsAndConditions(self):
query = gdata.health.service.HealthProfileQuery(
projection='ui', profile_id=self.profile_id,
params={'digest': 'true'}, categories=['medication|condition'])
feed = self.health.GetProfileFeed(query=query)
self.assertEqual(len(feed.entry), 1)
if feed.entry[0].ccr.GetMedications() is not None:
self.assert_(feed.entry[0].ccr.GetMedications()[0] is not None)
self.assert_(feed.entry[0].ccr.GetConditions()[0] is not None)
self.assert_(feed.entry[0].ccr.GetAllergies() is None)
self.assert_(feed.entry[0].ccr.GetAlerts() is None)
self.assert_(feed.entry[0].ccr.GetResults() is None)
class H9QueryProfileTest(unittest.TestCase):
def setUp(self):
self.h9 = gdata.health.service.HealthService(use_h9_sandbox=True)
self.h9.ClientLogin(username, password, source='H9 Client Unit Tests')
self.profile_list_feed = self.h9.GetProfileListFeed()
self.profile_id = self.profile_list_feed.entry[0].GetProfileId()
def testGetProfileFeed(self):
feed = self.h9.GetProfileFeed(profile_id=self.profile_id)
self.assert_(isinstance(feed, gdata.health.ProfileFeed))
self.assert_(feed.entry[0].ccr is not None)
def testGetProfileFeedByQuery(self):
query = gdata.health.service.HealthProfileQuery(
service='h9', projection='ui', profile_id=self.profile_id)
feed = self.h9.GetProfileFeed(query=query)
self.assert_(isinstance(feed, gdata.health.ProfileFeed))
self.assert_(feed.entry[0].ccr is not None)
class HealthNoticeTest(unittest.TestCase):
def setUp(self):
self.health = gdata.health.service.HealthService()
self.health.ClientLogin(username, password, source='Health Client Unit Tests')
self.profile_list_feed = self.health.GetProfileListFeed()
self.profile_id = self.profile_list_feed.entry[0].GetProfileId()
def testSendNotice(self):
subject_line = 'subject line'
body = 'Notice <b>body</b>.'
ccr_xml = test_data.HEALTH_CCR_NOTICE_PAYLOAD
created_entry = self.health.SendNotice(subject_line,
body,
ccr=ccr_xml,
profile_id=self.profile_id)
self.assertEqual(created_entry.title.text, subject_line)
self.assertEqual(created_entry.content.text, body)
self.assertEqual(created_entry.content.type, 'html')
problem = created_entry.ccr.GetProblems()[0]
problem_desc = problem.FindChildren('Description')[0]
name = problem_desc.FindChildren('Text')[0]
self.assertEqual(name.text, 'Aortic valve disorders')
class H9NoticeTest(unittest.TestCase):
def setUp(self):
self.h9 = gdata.health.service.HealthService(use_h9_sandbox=True)
self.h9.ClientLogin(username, password, source='H9 Client Unit Tests')
self.profile_list_feed = self.h9.GetProfileListFeed()
self.profile_id = self.profile_list_feed.entry[0].GetProfileId()
def testSendNotice(self):
subject_line = 'subject line'
body = 'Notice <b>body</b>.'
ccr_xml = test_data.HEALTH_CCR_NOTICE_PAYLOAD
created_entry = self.h9.SendNotice(subject_line, body, ccr=ccr_xml,
profile_id=self.profile_id)
self.assertEqual(created_entry.title.text, subject_line)
self.assertEqual(created_entry.content.text, body)
self.assertEqual(created_entry.content.type, 'html')
problem = created_entry.ccr.GetProblems()[0]
problem_desc = problem.FindChildren('Description')[0]
name = problem_desc.FindChildren('Text')[0]
self.assertEqual(name.text, 'Aortic valve disorders')
if __name__ == '__main__':
print ('Health API Tests\nNOTE: Please run these tests only with a test '
'account. The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
unittest.main()
| agpl-3.0 |
mzhou/lge-kernel-p880-cyanogenmod | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
ivano666/tensorflow | tensorflow/python/kernel_tests/fft_ops_test.py | 9 | 8247 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fft operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import xrange # pylint: disable=redefined-builtin
VALID_FFT_RANKS = (1, 2, 3)
class BaseFFTOpsTest(tf.test.TestCase):
def _tfFFT(self, x, rank, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
return self._tfFFTForRank(rank)(x).eval()
def _npFFT(self, x, rank):
if rank == 1:
return np.fft.fft2(x, axes=(-1,))
elif rank == 2:
return np.fft.fft2(x, axes=(-2, -1))
elif rank == 3:
return np.fft.fft2(x, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _tfIFFT(self, x, rank, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
return self._tfIFFTForRank(rank)(x).eval()
def _npIFFT(self, x, rank):
if rank == 1:
return np.fft.ifft2(x, axes=(-1,))
elif rank == 2:
return np.fft.ifft2(x, axes=(-2, -1))
elif rank == 3:
return np.fft.ifft2(x, axes=(-3, -2, -1))
else:
raise ValueError("invalid rank")
def _Compare(self, x, rank):
if tf.test.is_built_with_cuda():
# GPU/Forward
self.assertAllClose(
self._npFFT(x, rank),
self._tfFFT(x, rank, use_gpu=True),
rtol=1e-4,
atol=1e-4)
# GPU/Backward
self.assertAllClose(
self._npIFFT(x, rank),
self._tfIFFT(x, rank, use_gpu=True),
rtol=1e-4,
atol=1e-4)
def _checkGrad(self, func, x, y, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
# func is a forward or inverse FFT function (batched or unbatched)
z = func(tf.complex(inx, iny))
# loss = sum(|z|^2)
loss = tf.reduce_sum(tf.real(z * tf.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
[inx, iny],
[list(x.shape), list(y.shape)],
loss,
[1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
class BatchFFTOpsTest(BaseFFTOpsTest):
def _tfFFTForRank(self, rank):
if rank == 1:
return tf.batch_fft
elif rank == 2:
return tf.batch_fft2d
elif rank == 3:
return tf.batch_fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return tf.batch_ifft
elif rank == 2:
return tf.batch_ifft2d
elif rank == 3:
return tf.batch_ifft3d
else:
raise ValueError("invalid rank")
def testEmpty(self):
if tf.test.is_built_with_cuda():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
x = np.zeros((0,) * dims).astype(np.complex64)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
def testBasic(self):
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(
np.mod(
np.arange(np.power(4, dims)), 10).reshape((4,) * dims), rank)
def testRandom(self):
np.random.seed(12345)
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 3):
self._Compare(gen((4,) * dims), rank)
def testError(self):
if tf.test.is_built_with_cuda():
for rank in VALID_FFT_RANKS:
for dims in xrange(0, rank):
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesOpError(
"Input must have rank of at least {}".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesOpError(
"Input must have rank of at least {}".format(rank)):
self._tfIFFT(x, rank)
def testGrad_Simple(self):
if tf.test.is_built_with_cuda():
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.ones(shape=(4,) * dims, dtype=np.float32) / 10.0
im = np.zeros(shape=(4,) * dims, dtype=np.float32)
self._checkGrad(self._tfFFTForRank(rank), re, im, use_gpu=True)
self._checkGrad(self._tfIFFTForRank(rank), re, im, use_gpu=True)
def testGrad_Random(self):
if tf.test.is_built_with_cuda():
np.random.seed(54321)
for rank in VALID_FFT_RANKS:
for dims in xrange(rank, rank + 2):
re = np.random.rand(*((3,) * dims)).astype(np.float32) * 2 - 1
im = np.random.rand(*((3,) * dims)).astype(np.float32) * 2 - 1
self._checkGrad(self._tfFFTForRank(rank), re, im, use_gpu=True)
self._checkGrad(self._tfIFFTForRank(rank), re, im, use_gpu=True)
class FFTOpsTest(BaseFFTOpsTest):
def _tfFFTForRank(self, rank):
if rank == 1:
return tf.fft
elif rank == 2:
return tf.fft2d
elif rank == 3:
return tf.fft3d
else:
raise ValueError("invalid rank")
def _tfIFFTForRank(self, rank):
if rank == 1:
return tf.ifft
elif rank == 2:
return tf.ifft2d
elif rank == 3:
return tf.ifft3d
else:
raise ValueError("invalid rank")
def testBasic(self):
for rank in VALID_FFT_RANKS:
self._Compare(
np.mod(
np.arange(np.power(4, rank)), 10).reshape((4,) * rank), rank)
def testRandom(self):
np.random.seed(12345)
def gen(shape):
n = np.prod(shape)
re = np.random.uniform(size=n)
im = np.random.uniform(size=n)
return (re + im * 1j).reshape(shape)
for rank in VALID_FFT_RANKS:
for size in (1, 5, 7, 15):
self._Compare(gen((size,) * rank), rank)
def testEmpty(self):
if tf.test.is_built_with_cuda():
for rank in VALID_FFT_RANKS:
x = np.zeros((0,) * rank).astype(np.complex64)
self.assertEqual(x.shape, self._tfFFT(x, rank).shape)
self.assertEqual(x.shape, self._tfIFFT(x, rank).shape)
def testError(self):
if tf.test.is_built_with_cuda():
for rank in VALID_FFT_RANKS:
for dims in list(range(0, rank)) + [rank + 1]:
x = np.zeros((1,) * dims).astype(np.complex64)
with self.assertRaisesOpError(
"Input must be of rank {} but got:".format(rank)):
self._tfFFT(x, rank)
with self.assertRaisesOpError(
"Input must be of rank {} but got:".format(rank)):
self._tfIFFT(x, rank)
def testGrad_Simple(self):
if tf.test.is_built_with_cuda():
for rank in VALID_FFT_RANKS:
re = np.ones(shape=(4,) * rank, dtype=np.float32) / 10.0
im = np.zeros(shape=(4,) * rank, dtype=np.float32)
self._checkGrad(self._tfFFTForRank(rank), re, im, use_gpu=True)
self._checkGrad(self._tfIFFTForRank(rank), re, im, use_gpu=True)
def testGrad_Random(self):
if tf.test.is_built_with_cuda():
np.random.seed(54321)
for rank in VALID_FFT_RANKS:
re = np.random.rand(*((3,) * rank)).astype(np.float32) * 2 - 1
im = np.random.rand(*((3,) * rank)).astype(np.float32) * 2 - 1
self._checkGrad(self._tfFFTForRank(rank), re, im, use_gpu=True)
self._checkGrad(self._tfIFFTForRank(rank), re, im, use_gpu=True)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
smajohusic/simple-app-with-vue | node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
daskol/ml-cipher-cracker | bigram_model.py | 3 | 2620 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
import numpy as np
import math
import random
import metropolis
import random_permutation as rm
import functools as ft
import sgt
ABC = 'abcdefghijklmnopqrstuvwxyz'
PLAIN_TEXT_FILENAME = 'main/oliver_twist.txt'
TRAIN_TEXT_FILENAME = 'main/war_and_peace.txt'
def gen_key(seed=42):
key = range(26)
random.seed(seed)
random.shuffle(key)
return key
def read_text(filename):
with open(filename, 'r') as f:
text = ''.join(c for c in f.read() if c in ABC)
return text
def quality(first, second):
return sum(1 for a, b in zip(first, second) if a != b) / len(first)
def encrypt(plain_text, key=gen_key()):
permutation = dict([(ABC[i], ABC[key[i]]) for i in xrange(26)])
return ''.join(permutation[c] for c in plain_text)
def get_statistics(text, num_chars=2):
if num_chars == 1:
stat = dict((a, 0.0) for a in ABC)
elif num_chars == 2:
stat = dict((a + b, 0.0) for a in ABC for b in ABC)
length = len(text)
for i in xrange(0, length / num_chars, num_chars):
letter = text[num_chars * i: num_chars * (i + 1)]
stat[letter] += 1.0
return stat
def get_pdf(key, text, stat, num_chars):
likelihood = 0.0
length = len(text)
if num_chars == 1:
mapping = dict((ABC[i], ABC[key[i]]) for i in xrange(26))
elif num_chars == 2:
mapping = dict((ABC[i] + ABC[j], ABC[key[i]] + ABC[key[j]])
for i in xrange(26) for j in xrange(26))
for i in xrange(0, length / num_chars, num_chars):
letter = text[num_chars * i: num_chars * (i + 1)]
freq = stat[mapping[letter]]
if freq > 0.0:
likelihood += math.log(freq)
else:
return float('-inf')
return likelihood
def test():
num_chars = 1
key = gen_key()
print('[INF] Permutation: ', key)
plain_text = read_text(PLAIN_TEXT_FILENAME)
train_text = read_text(TRAIN_TEXT_FILENAME)
cipher_text = encrypt(plain_text, key)
print('[INF] Texts are read and encrypted.')
stat = get_statistics(train_text, num_chars)
sampler = metropolis.metropolis(
desiredPDF=ft.partial(get_pdf, text=cipher_text, stat=stat, num_chars=num_chars),
initValue=rm.uniform(26),
computableRVS=lambda t: rm.applyedTranspostions(t),
skipIterations=2)
print('[INF] Initialization')
samples = [sampler.next()[0]]
print('[INF] Sampling...')
samples += [sampler.next() for i in xrange(1000)]
for sample in samples:
print(sample)
if __name__ == '__main__':
test() | mit |
adamjmcgrath/glancydesign | django/core/cache/backends/locmem.py | 229 | 4336 | "Thread-safe in-memory cache backend."
import time
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.cache.backends.base import BaseCache
from django.utils.synch import RWLock
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
global _caches, _expire_info, _locks
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._lock.writer_enters()
try:
exp = self._expire_info.get(key)
if exp is None or exp <= time.time():
try:
self._set(key, pickle.dumps(value), timeout)
return True
except pickle.PickleError:
pass
return False
finally:
self._lock.writer_leaves()
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._lock.reader_enters()
try:
exp = self._expire_info.get(key)
if exp is None:
return default
elif exp > time.time():
try:
return pickle.loads(self._cache[key])
except pickle.PickleError:
return default
finally:
self._lock.reader_leaves()
self._lock.writer_enters()
try:
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
finally:
self._lock.writer_leaves()
def _set(self, key, value, timeout=None):
if len(self._cache) >= self._max_entries:
self._cull()
if timeout is None:
timeout = self.default_timeout
self._cache[key] = value
self._expire_info[key] = time.time() + timeout
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._lock.writer_enters()
# Python 2.4 doesn't allow combined try-except-finally blocks.
try:
try:
self._set(key, pickle.dumps(value), timeout)
except pickle.PickleError:
pass
finally:
self._lock.writer_leaves()
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._lock.reader_enters()
try:
exp = self._expire_info.get(key)
if exp is None:
return False
elif exp > time.time():
return True
finally:
self._lock.reader_leaves()
self._lock.writer_enters()
try:
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
finally:
self._lock.writer_leaves()
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._lock.writer_enters()
try:
self._delete(key)
finally:
self._lock.writer_leaves()
def clear(self):
self._cache.clear()
self._expire_info.clear()
# For backwards compatibility
class CacheClass(LocMemCache):
pass
| bsd-3-clause |
nagyistoce/odoo-dev-odoo | addons/l10n_at/__init__.py | 438 | 1050 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) conexus.at
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
2ndQuadrant/ansible | lib/ansible/modules/network/avi/avi_useraccount.py | 10 | 4005 | #!/usr/bin/python
"""
# Created on Aug 12, 2016
#
# @author: Gaurav Rastogi ([email protected]) GitHub ID: grastogi23
#
# module_check: not supported
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_useraccount
author: Chaitanya Deshpande (@chaitanyaavi) <[email protected]>
short_description: Avi UserAccount Module
description:
- This module can be used for updating the password of a user.
- This module is useful for setting up admin password for Controller bootstrap.
version_added: 2.6
requirements: [ avisdk ]
options:
old_password:
description:
- Old password for update password or default password for bootstrap.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Update user password
avi_useraccount:
controller: ""
username: ""
password: new_password
old_password: ""
api_version: ""
- name: Update user password using avi_credentials
avi_useraccount:
avi_credentials: ""
old_password: ""
'''
RETURN = '''
obj:
description: Avi REST resource
returned: success, changed
type: dict
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from copy import deepcopy
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, ansible_return, HAS_AVI)
from avi.sdk.avi_api import ApiSession, AviCredentials
from avi.sdk.utils.ansible_utils import avi_obj_cmp, cleanup_absent_fields
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
old_password=dict(type='str', required=True, no_log=True)
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(argument_spec=argument_specs)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
api_creds = AviCredentials()
api_creds.update_from_ansible_module(module)
password_updated = False
old_password = module.params.get('old_password')
data = {
'old_password': old_password,
'password': api_creds.password
}
password_changed = False
try:
api = ApiSession.get_session(
api_creds.controller, api_creds.username,
password=api_creds.password, timeout=api_creds.timeout,
tenant=api_creds.tenant, tenant_uuid=api_creds.tenant_uuid,
token=api_creds.token, port=api_creds.port)
password_changed = True
return ansible_return(module, None, False, req=data)
except Exception:
pass
if not password_changed:
api = ApiSession.get_session(
api_creds.controller, api_creds.username, password=old_password,
timeout=api_creds.timeout, tenant=api_creds.tenant,
tenant_uuid=api_creds.tenant_uuid, token=api_creds.token,
port=api_creds.port)
rsp = api.put('useraccount', data=data)
if rsp:
return ansible_return(module, rsp, True, req=data)
return module.exit_json(changed=False, obj=data)
if __name__ == '__main__':
main()
| gpl-3.0 |
edx/ansible | v2/ansible/plugins/action/async.py | 16 | 2739 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import random
from ansible import constants as C
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' transfer the given module name, plus the async module, then run it '''
# FIXME: noop stuff needs to be sorted ut
#if self.runner.noop_on_check(inject):
# return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
if not tmp:
tmp = self._make_tmp_path()
module_name = self._task.action
async_module_path = self._shell.join_path(tmp, 'async_wrapper')
remote_module_path = self._shell.join_path(tmp, module_name)
env_string = self._compute_environment_string()
# configure, upload, and chmod the target module
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=self._task.args)
self._transfer_data(remote_module_path, module_data)
self._remote_chmod(tmp, 'a+rx', remote_module_path)
# configure, upload, and chmod the async_wrapper module
(async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict())
self._transfer_data(async_module_path, async_module_data)
self._remote_chmod(tmp, 'a+rx', async_module_path)
argsfile = self._transfer_data(self._shell.join_path(tmp, 'arguments'), json.dumps(self._task.args))
async_limit = self._task.async
async_jid = str(random.randint(0, 999999999999))
async_cmd = " ".join([str(x) for x in [async_module_path, async_jid, async_limit, remote_module_path, argsfile]])
result = self._low_level_execute_command(cmd=async_cmd, tmp=None)
# clean up after
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
self._remove_tmp_path(tmp)
result['changed'] = True
return result
| gpl-3.0 |
mostaphaRoudsari/Butterfly | butterfly/snappyHexMeshDict.py | 2 | 16257 | # coding=utf-8
"""snappyHexMeshDict class."""
from collections import OrderedDict
import re
from .foamfile import FoamFile, foam_file_from_file
from .utilities import get_snappyHexMesh_geometry_feild, \
get_snappyHexMesh_refinement_surfaces, get_snappyHexMesh_surface_layers
from .refinementRegion import refinement_mode_from_dict
# TODO(mostapha): Move default values into a separate file.
# TODO(mostapha): Add specific methods to access most common values
class SnappyHexMeshDict(FoamFile):
"""Control dict class."""
# set default valus for this class
__default_values = OrderedDict()
__default_values['castellatedMesh'] = 'true'
__default_values['snap'] = 'true'
__default_values['addLayers'] = 'false'
# geometry
__default_values['geometry'] = {}
# castellatedMeshControls
__default_values['castellatedMeshControls'] = OrderedDict()
__default_values['castellatedMeshControls']['maxLocalCells'] = '1000000'
__default_values['castellatedMeshControls']['maxGlobalCells'] = '2000000'
__default_values['castellatedMeshControls']['minRefinementCells'] = '10'
__default_values['castellatedMeshControls']['maxLoadUnbalance'] = '0.10'
__default_values['castellatedMeshControls']['nCellsBetweenLevels'] = '3'
__default_values['castellatedMeshControls']['features'] = '()'
__default_values['castellatedMeshControls']['refinementSurfaces'] = {}
__default_values['castellatedMeshControls']['resolveFeatureAngle'] = '180'
__default_values['castellatedMeshControls']['refinementRegions'] = {}
__default_values['castellatedMeshControls']['locationInMesh'] = '(0 0 0)'
__default_values['castellatedMeshControls']['allowFreeStandingZoneFaces'] = 'true'
# snap controls
__default_values['snapControls'] = OrderedDict()
__default_values['snapControls']['nSmoothPatch'] = '5'
__default_values['snapControls']['tolerance'] = '5'
__default_values['snapControls']['nSolveIter'] = '100'
__default_values['snapControls']['nRelaxIter'] = '8'
__default_values['snapControls']['nFeatureSnapIter'] = '10'
__default_values['snapControls']['extractFeaturesRefineLevel'] = None
__default_values['snapControls']['explicitFeatureSnap'] = None
__default_values['snapControls']['implicitFeatureSnap'] = 'true'
__default_values['snapControls']['multiRegionFeatureSnap'] = 'true'
# layer control
__default_values['addLayersControls'] = OrderedDict()
__default_values['addLayersControls']['relativeSizes'] = 'true'
__default_values['addLayersControls']['layers'] = {}
__default_values['addLayersControls']['expansionRatio'] = '1.0'
__default_values['addLayersControls']['finalLayerThickness'] = '0.3'
__default_values['addLayersControls']['minThickness'] = '0.2'
__default_values['addLayersControls']['nGrow'] = '0'
__default_values['addLayersControls']['featureAngle'] = '110'
__default_values['addLayersControls']['nRelaxIter'] = '3'
__default_values['addLayersControls']['nSmoothSurfaceNormals'] = '1'
__default_values['addLayersControls']['nSmoothThickness'] = '10'
__default_values['addLayersControls']['nSmoothNormals'] = '3'
__default_values['addLayersControls']['maxFaceThicknessRatio'] = '0.5'
__default_values['addLayersControls']['maxThicknessToMedialRatio'] = '0.3'
__default_values['addLayersControls']['minMedianAxisAngle'] = '130'
__default_values['addLayersControls']['nBufferCellsNoExtrude'] = '0'
__default_values['addLayersControls']['nLayerIter'] = '50'
__default_values['addLayersControls']['nRelaxedIter'] = '20'
__default_values['meshQualityControls'] = OrderedDict()
__default_values['meshQualityControls']['maxNonOrtho'] = '60'
__default_values['meshQualityControls']['maxBoundarySkewness'] = '20'
__default_values['meshQualityControls']['maxInternalSkewness'] = '4'
__default_values['meshQualityControls']['maxConcave'] = '80'
__default_values['meshQualityControls']['minFlatness'] = '0.5'
__default_values['meshQualityControls']['minVol'] = '1e-13'
__default_values['meshQualityControls']['minTetQuality'] = '1e-15'
__default_values['meshQualityControls']['minArea'] = '-1'
__default_values['meshQualityControls']['minTwist'] = '0.02'
__default_values['meshQualityControls']['minDeterminant'] = '0.001'
__default_values['meshQualityControls']['minFaceWeight'] = '0.02'
__default_values['meshQualityControls']['minVolRatio'] = '0.01'
__default_values['meshQualityControls']['minTriangleTwist'] = '-1'
__default_values['meshQualityControls']['nSmoothScale'] = '4'
__default_values['meshQualityControls']['errorReduction'] = '0.75'
__default_values['meshQualityControls']['relaxed'] = {'maxNonOrtho': '75'}
__default_values['debug'] = '0'
__default_values['mergeTolerance'] = '1E-6'
__globRefineLevel = (0, 0)
def __init__(self, values=None):
"""Init class."""
FoamFile.__init__(self, name='snappyHexMeshDict', cls='dictionary',
location='system', default_values=self.__default_values,
values=values)
self.__geometries = None
self.__isFeatureEdgeRefinementImplicit = True
self.convertToMeters = 1.0 # This is useful to scale the locationInMesh
@classmethod
def from_file(cls, filepath):
"""Create a FoamFile from a file.
Args:
filepath: Full file path to dictionary.
"""
return cls(values=foam_file_from_file(filepath, cls.__name__))
@classmethod
def from_bf_geometries(cls, project_name, geometries, meshing_parameters=None,
convertToMeters=1, values=None):
"""Create snappyHexMeshDict from HBGeometries."""
_cls = cls(values)
_cls.convertToMeters = convertToMeters
_cls.project_name = project_name
_cls.__geometries = cls._check_input_geometries(geometries)
_cls.update_meshing_parameters(meshing_parameters)
_cls.set_geometry()
_cls.set_refinement_surfaces()
_cls.set_nSurfaceLayers()
return _cls
@property
def project_name(self):
"""Project name."""
return self.__project_name
# TODO(mostapha): updating the name should update refinementSurfaces and setGeometry
# when happens from Case.from_file() with no butterfly geometry.
@project_name.setter
def project_name(self, name):
assert re.match("^[a-zA-Z0-9_]*$", name), \
'Invalid project name: "{}".\n' \
'Do not use whitespace or special charecters.'.format(name)
self.__project_name = name
@property
def geometries(self):
"""Butterfly geometries."""
return self.__geometries
@property
def is_featureEdgeRefinement_implicit(self):
"""Return True if implicit feature refinment is used."""
return self.__isFeatureEdgeRefinementImplicit
@property
def locationInMesh(self):
"""A tuple for the location of the volume the should be meshed.
x, y, z values will be multiplied to self.convertToMeters. If the units
are not Meters you can set the convertToMeters using self.convertToMeters
"""
return self.values['castellatedMeshControls']['locationInMesh']
@locationInMesh.setter
def locationInMesh(self, point):
if not point:
point = (0, 0, 0)
try:
x, y, z = tuple(eval(point))
except Exception:
x, y, z = tuple(point)
# scale point based on convertToMeters
point = x * self.convertToMeters, \
y * self.convertToMeters, \
z * self.convertToMeters
self.values['castellatedMeshControls']['locationInMesh'] = \
str(tuple(point)).replace(',', "")
@property
def globRefineLevel(self):
"""A tuple of (min, max) values for global refinment."""
return self.__globRefineLevel
@globRefineLevel.setter
def globRefineLevel(self, r):
self.__globRefineLevel = (0, 0) if not r else tuple(r)
if self.__globRefineLevel:
self.set_refinement_surfaces()
@property
def castellatedMesh(self):
"""Set if castellatedMesh should be ran."""
return self.values['castellatedMesh']
@castellatedMesh.setter
def castellatedMesh(self, value=True):
value = value if isinstance(value, bool) else \
bool(str(value).capitalize())
self.values['castellatedMesh'] = str(value).lower()
@property
def snap(self):
"""Set if snap should be ran."""
return self.values['snap']
@snap.setter
def snap(self, value=True):
value = value if isinstance(value, bool) else \
bool(str(value).capitalize())
self.values['snap'] = str(value).lower()
@property
def addLayers(self):
"""Set if addLayers should be ran."""
return self.values['addLayers']
@addLayers.setter
def addLayers(self, value=True):
value = value if isinstance(value, bool) else \
bool(str(value).capitalize())
self.values['addLayers'] = str(value).lower()
@property
def features(self):
"""Set features for castellatedMeshControls."""
return self.values['castellatedMeshControls']['features']
@features.setter
def features(self, value=None):
value = value or ()
self.values['castellatedMeshControls']['features'] = str(value)
@property
def extractFeaturesRefineLevel(self):
"""A refinment value for extract feature level."""
return self.values['snapControls']['extractFeaturesRefineLevel']
@extractFeaturesRefineLevel.setter
def extractFeaturesRefineLevel(self, value=1):
self.values['snapControls']['extractFeaturesRefineLevel'] = str(int(value))
@property
def nCellsBetweenLevels(self):
"""Number of cells between levels for castellatedMeshControls (default: 3)."""
return self.values['castellatedMeshControls']['nCellsBetweenLevels']
@nCellsBetweenLevels.setter
def nCellsBetweenLevels(self, value=3):
value = value or 3
self.values['castellatedMeshControls']['nCellsBetweenLevels'] = str(int(value))
@property
def maxGlobalCells(self):
"""Number of max global cells for castellatedMeshControls (default: 2000000)."""
return self.values['castellatedMeshControls']['maxGlobalCells']
@maxGlobalCells.setter
def maxGlobalCells(self, value=2000000):
value = value or 2000000
self.values['castellatedMeshControls']['maxGlobalCells'] = str(int(value))
@property
def stl_file_names(self):
"""List of names for stl files if any.
This method doesn't return stl files for refinementRegions. You can use
self.refinementRegion_names to get the names for refinment regions.
"""
stl_f_names = self.values['geometry'].keys()
return tuple(f[:-4] for f in stl_f_names
if not f[:-4] in self.refinementRegion_names)
@property
def refinementRegions(self):
"""Refinement regions."""
return self.values['castellatedMeshControls']['refinementRegions']
@property
def refinementRegion_names(self):
"""List of stl files if any."""
return self.values['castellatedMeshControls']['refinementRegions'].keys()
def update_meshing_parameters(self, meshing_parameters):
"""Update meshing parameters for blockMeshDict."""
if not meshing_parameters:
return
assert hasattr(meshing_parameters, 'isMeshingParameters'), \
'Expected MeshingParameters not {}'.format(type(meshing_parameters))
if meshing_parameters.locationInMesh:
self.locationInMesh = meshing_parameters.locationInMesh
if meshing_parameters.globRefineLevel:
self.globRefineLevel = meshing_parameters.globRefineLevel
def refinementRegion_mode(self, refinementRegion_name):
"""Refinement region mode for a refinement region."""
assert refinementRegion_name in self.refinementRegion_names, \
'Failed to find {} in {}'.format(refinementRegion_name,
self.refinementRegion_names)
c_mesh_control = self.values['castellatedMeshControls']
mode = c_mesh_control['refinementRegions'][refinementRegion_name]
return refinement_mode_from_dict(mode)
def set_geometry(self):
"""Set geometry from bf_geometries."""
_geoField = get_snappyHexMesh_geometry_feild(self.project_name,
self.geometries,
meshing_type='triSurfaceMesh')
self.values['geometry'].update(_geoField)
def set_refinement_surfaces(self):
"""Set refinement values for geometries."""
_ref = get_snappyHexMesh_refinement_surfaces(self.project_name,
self.geometries,
self.globRefineLevel)
self.values['castellatedMeshControls']['refinementSurfaces'] = _ref
def set_nSurfaceLayers(self):
"""Set number of surface layers for geometries."""
layers = get_snappyHexMesh_surface_layers(self.geometries)
self.values['addLayersControls']['layers'] = layers
def set_featureEdgeRefinement_to_implicit(self):
"""Set meshing snap to implicitFeatureSnap."""
self.values['snapControls']['implicitFeatureSnap'] = 'true'
self.values['snapControls']['multiRegionFeatureSnap'] = 'true'
self.values['snapControls']['explicitFeatureSnap'] = None
self.values['snapControls']['extractFeaturesRefineLevel'] = None
self.values['castellatedMeshControls']['features'] = '()'
self.__isFeatureEdgeRefinementImplicit = True
def set_featureEdgeRefinement_to_explicit(self, file_name, refinement_level=2):
"""Set meshing snap to explicitFeatureSnap.
Args:
file_name: eMesh file name.
refinement_level: extractFeaturesRefineLevel (default: 2)
"""
file_name = file_name.replace('.eMesh', '')
if hasattr(refinement_level, 'levels'):
# in case it's a distance refinment
refinement_level = refinement_level.levels
else:
refinement_level = refinement_level or 2
self.values['castellatedMeshControls']['features'] = \
'({file "%s.eMesh"; level %s;} )' % (file_name, str(refinement_level))
self.values['snapControls']['implicitFeatureSnap'] = None
self.values['snapControls']['multiRegionFeatureSnap'] = None
self.values['snapControls']['explicitFeatureSnap'] = 'true'
self.values['snapControls']['extractFeaturesRefineLevel'] = 'true'
self.__isFeatureEdgeRefinementImplicit = False
def add_stl_geometry(self, file_name):
"""Add stl geometry to snappyHexMeshDict.
Args:
file_name: Stl file name. This file should be located under
/constant/triSurface.
"""
stl = {'{}.stl'.format(file_name): {'type': 'triSurfaceMesh',
'name': file_name}}
self.values['geometry'].update(stl)
def add_refinementRegion(self, refinementRegion=None):
"""Add refinement region to snappyHexMeshDict."""
if refinementRegion is None:
return
assert hasattr(refinementRegion, 'isRefinementRegion'), \
'{} is not a refinement region.'.format(refinementRegion)
# add geometry to stl
self.add_stl_geometry(refinementRegion.name)
rg = {refinementRegion.name:
refinementRegion.refinement_mode.to_openfoam_dict()}
self.values['castellatedMeshControls']['refinementRegions'].update(rg)
@staticmethod
def _check_input_geometries(geos):
for geo in geos:
assert hasattr(geo, 'isBFMesh'), \
'Expected butterfly.Mesh not {}'.format(geo)
return geos
| gpl-3.0 |
ovnicraft/edx-platform | common/lib/xmodule/xmodule/tests/test_word_cloud.py | 166 | 1792 | # -*- coding: utf-8 -*-
"""Test for Word cloud Xmodule functional logic."""
from webob.multidict import MultiDict
from xmodule.word_cloud_module import WordCloudDescriptor
from . import LogicTest
class WordCloudModuleTest(LogicTest):
"""Logic tests for Word Cloud Xmodule."""
descriptor_class = WordCloudDescriptor
raw_field_data = {
'all_words': {'cat': 10, 'dog': 5, 'mom': 1, 'dad': 2},
'top_words': {'cat': 10, 'dog': 5, 'dad': 2},
'submitted': False
}
def test_bad_ajax_request(self):
"Make sure that answer for incorrect request is error json"
response = self.ajax_request('bad_dispatch', {})
self.assertDictEqual(response, {
'status': 'fail',
'error': 'Unknown Command!'
})
def test_good_ajax_request(self):
"Make sure that ajax request works correctly"
post_data = MultiDict(('student_words[]', word) for word in ['cat', 'cat', 'dog', 'sun'])
response = self.ajax_request('submit', post_data)
self.assertEqual(response['status'], 'success')
self.assertEqual(response['submitted'], True)
self.assertEqual(response['total_count'], 22)
self.assertDictEqual(
response['student_words'],
{'sun': 1, 'dog': 6, 'cat': 12}
)
self.assertListEqual(
response['top_words'],
[{'text': 'dad', 'size': 2, 'percent': 9.0},
{'text': 'sun', 'size': 1, 'percent': 5.0},
{'text': 'dog', 'size': 6, 'percent': 27.0},
{'text': 'mom', 'size': 1, 'percent': 5.0},
{'text': 'cat', 'size': 12, 'percent': 54.0}]
)
self.assertEqual(
100.0,
sum(i['percent'] for i in response['top_words']))
| agpl-3.0 |
basquith16/EtsyClone | node_modules/node-gyp/gyp/pylib/gyp/common_test.py | 2542 | 1970 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
| mit |
itsjustshana/project2 | main.py | 1 | 1149 | """`main` is the top level module for your Flask application."""
# Import the Flask Framework
from flask import Flask
from flask import render_template
app = Flask(__name__)
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def game(name=None):
"""Return a friendly HTTP greeting."""
return render_template('cards.html', name=name)
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.route('/me')
def me(name=None):
""" Return me template at application /me URL."""
return render_template('me.html', name=name)
@app.route('/calc')
def calc(name=None):
""" Return me template at application /calc URL."""
return render_template('calc.html', name=name)
@app.route('/catwalk')
def catwalk(name=None):
""" Return me template at application /calc URL."""
return render_template('catwalk.html', name=name)
@app.route('/card')
def card(name=None):
""" Return me template at application /calc URL."""
return render_template('cards.html', name=name)
| apache-2.0 |
acabey/acabey.github.io | projects/demos/engineering.purdue.edu/scriptingwithobjects/swocode/chap9/MySingleton.py | 1 | 1916 | #!/usr/bin/python
### MySingleton.py
#------------------------- class Singleton --------------------------
class Singleton( object ): #(A)
def __new__( cls, *args, **kwds ): #(B)
# it = cls.__dict__.get( "__it__" ) #(C)
it = None
if (cls.__dict__.has_key("__it__")):
it = cls.__dict__["__it__"] #(C)
if it is not None: #(D)
return it #(E)
cls.__it__ = it = object.__new__( cls ) #(F)
it.init( *args, **kwds ) #(G)
return it #(H)
def init( self, *args, **kwds ): #(I)
pass #(J)
#------------------------ class MYSingleton -------------------------
class MySingleton( Singleton ): #(K)
def init( self, mm ): #(L)
print "init of MySingleton called"
self.m = mm #(M)
# def __init__( self, mm ):
# print "__init__ of MySingleton called"
#---------------------------- Test Code ------------------------------
x = MySingleton( 10 ) #(N)
print x.__class__ # <class '__main__.MySingleton'> #(O)
y = MySingleton( 20 ) #(P)
print x is y # true #(Q)
print x.m # 10 #(R)
print y.m # 10 #(S)
| gpl-3.0 |
cl4rke/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/scripts/modules/rna_info.py | 1 | 22979 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# classes for extracting info from blenders internal classes
import bpy
# use to strip python paths
script_paths = bpy.utils.script_paths()
_FAKE_STRUCT_SUBCLASS = True
def _get_direct_attr(rna_type, attr):
props = getattr(rna_type, attr)
base = rna_type.base
if not base:
return [prop for prop in props]
else:
props_base = getattr(base, attr).values()
return [prop for prop in props if prop not in props_base]
def get_direct_properties(rna_type):
return _get_direct_attr(rna_type, "properties")
def get_direct_functions(rna_type):
return _get_direct_attr(rna_type, "functions")
def rna_id_ignore(rna_id):
if rna_id == "rna_type":
return True
if "_OT_" in rna_id:
return True
if "_MT_" in rna_id:
return True
if "_PT_" in rna_id:
return True
if "_HT_" in rna_id:
return True
if "_KSI_" in rna_id:
return True
return False
def range_str(val):
if val < -10000000:
return "-inf"
elif val > 10000000:
return "inf"
elif type(val) == float:
return '%g' % val
else:
return str(val)
def float_as_string(f):
val_str = "%g" % f
if '.' not in val_str and '-' not in val_str: # value could be 1e-05
val_str += '.0'
return val_str
class InfoStructRNA:
__slots__ = (
"bl_rna",
"identifier",
"name",
"description",
"base",
"nested",
"full_path",
"functions",
"children",
"references",
"properties",
)
global_lookup = {}
def __init__(self, rna_type):
self.bl_rna = rna_type
self.identifier = rna_type.identifier
self.name = rna_type.name
self.description = rna_type.description.strip()
# set later
self.base = None
self.nested = None
self.full_path = ""
self.functions = []
self.children = []
self.references = []
self.properties = []
def build(self):
rna_type = self.bl_rna
parent_id = self.identifier
self.properties[:] = [GetInfoPropertyRNA(rna_prop, parent_id) for rna_prop in get_direct_properties(rna_type) if rna_prop.identifier != "rna_type"]
self.functions[:] = [GetInfoFunctionRNA(rna_prop, parent_id) for rna_prop in get_direct_functions(rna_type)]
def get_bases(self):
bases = []
item = self
while item:
item = item.base
if item:
bases.append(item)
return bases
def get_nested_properties(self, ls=None):
if not ls:
ls = self.properties[:]
if self.nested:
self.nested.get_nested_properties(ls)
return ls
def _get_py_visible_attrs(self):
attrs = []
py_class = getattr(bpy.types, self.identifier)
for attr_str in dir(py_class):
if attr_str.startswith("_"):
continue
attrs.append((attr_str, getattr(py_class, attr_str)))
return attrs
def get_py_properties(self):
properties = []
for identifier, attr in self._get_py_visible_attrs():
if type(attr) is property:
properties.append((identifier, attr))
return properties
def get_py_functions(self):
import types
functions = []
for identifier, attr in self._get_py_visible_attrs():
# methods may be python wrappers to C functions
attr_func = getattr(attr, "__func__", attr)
if type(attr_func) in {types.FunctionType, types.MethodType}:
functions.append((identifier, attr))
return functions
def get_py_c_functions(self):
import types
functions = []
for identifier, attr in self._get_py_visible_attrs():
# methods may be python wrappers to C functions
attr_func = getattr(attr, "__func__", attr)
if type(attr_func) in {types.BuiltinMethodType, types.BuiltinFunctionType}:
functions.append((identifier, attr))
return functions
def __str__(self):
txt = ""
txt += self.identifier
if self.base:
txt += "(%s)" % self.base.identifier
txt += ": " + self.description + "\n"
for prop in self.properties:
txt += prop.__repr__() + "\n"
for func in self.functions:
txt += func.__repr__() + "\n"
return txt
class InfoPropertyRNA:
__slots__ = (
"bl_prop",
"srna",
"identifier",
"name",
"description",
"default_str",
"default",
"enum_items",
"min",
"max",
"array_length",
"collection_type",
"type",
"fixed_type",
"is_argument_optional",
"is_enum_flag",
"is_required",
"is_readonly",
"is_never_none",
)
global_lookup = {}
def __init__(self, rna_prop):
self.bl_prop = rna_prop
self.identifier = rna_prop.identifier
self.name = rna_prop.name
self.description = rna_prop.description.strip()
self.default_str = "<UNKNOWN>"
def build(self):
rna_prop = self.bl_prop
self.enum_items = []
self.min = getattr(rna_prop, "hard_min", -1)
self.max = getattr(rna_prop, "hard_max", -1)
self.array_length = getattr(rna_prop, "array_length", 0)
self.collection_type = GetInfoStructRNA(rna_prop.srna)
self.is_required = rna_prop.is_required
self.is_readonly = rna_prop.is_readonly
self.is_never_none = rna_prop.is_never_none
self.is_argument_optional = rna_prop.is_argument_optional
self.type = rna_prop.type.lower()
fixed_type = getattr(rna_prop, "fixed_type", "")
if fixed_type:
self.fixed_type = GetInfoStructRNA(fixed_type) # valid for pointer/collections
else:
self.fixed_type = None
if self.type == "enum":
self.enum_items[:] = [(item.identifier, item.name, item.description) for item in rna_prop.enum_items]
self.is_enum_flag = rna_prop.is_enum_flag
else:
self.is_enum_flag = False
if self.array_length:
self.default = tuple(getattr(rna_prop, "default_array", ()))
elif self.type == "enum" and self.is_enum_flag:
self.default = getattr(rna_prop, "default_flag", set())
else:
self.default = getattr(rna_prop, "default", None)
self.default_str = "" # fallback
if self.type == "pointer":
# pointer has no default, just set as None
self.default = None
self.default_str = "None"
elif self.type == "string":
self.default_str = "\"%s\"" % self.default
elif self.type == "enum":
if self.is_enum_flag:
self.default_str = "%r" % self.default # repr or set()
else:
self.default_str = "'%s'" % self.default
elif self.array_length:
self.default_str = ''
# special case for floats
if len(self.default) > 0:
if self.type == "float":
self.default_str = "(%s)" % ", ".join(float_as_string(f) for f in self.default)
if not self.default_str:
self.default_str = str(self.default)
else:
if self.type == "float":
self.default_str = float_as_string(self.default)
else:
self.default_str = str(self.default)
self.srna = GetInfoStructRNA(rna_prop.srna) # valid for pointer/collections
def get_arg_default(self, force=True):
default = self.default_str
if default and (force or self.is_required is False):
return "%s=%s" % (self.identifier, default)
return self.identifier
def get_type_description(self, as_ret=False, as_arg=False, class_fmt="%s", collection_id="Collection"):
type_str = ""
if self.fixed_type is None:
type_str += self.type
if self.array_length:
type_str += " array of %d items" % (self.array_length)
if self.type in {"float", "int"}:
type_str += " in [%s, %s]" % (range_str(self.min), range_str(self.max))
elif self.type == "enum":
if self.is_enum_flag:
type_str += " set in {%s}" % ", ".join(("'%s'" % s[0]) for s in self.enum_items)
else:
type_str += " in [%s]" % ", ".join(("'%s'" % s[0]) for s in self.enum_items)
if not (as_arg or as_ret):
# write default property, ignore function args for this
if self.type != "pointer":
if self.default_str:
type_str += ", default %s" % self.default_str
else:
if self.type == "collection":
if self.collection_type:
collection_str = (class_fmt % self.collection_type.identifier) + (" %s of " % collection_id)
else:
collection_str = "%s of " % collection_id
else:
collection_str = ""
type_str += collection_str + (class_fmt % self.fixed_type.identifier)
# setup qualifiers for this value.
type_info = []
if as_ret:
pass
elif as_arg:
if not self.is_required:
type_info.append("optional")
if self.is_argument_optional:
type_info.append("optional argument")
else: # readonly is only useful for self's, not args
if self.is_readonly:
type_info.append("readonly")
if self.is_never_none:
type_info.append("never None")
if type_info:
type_str += (", (%s)" % ", ".join(type_info))
return type_str
def __str__(self):
txt = ""
txt += " * " + self.identifier + ": " + self.description
return txt
class InfoFunctionRNA:
__slots__ = (
"bl_func",
"identifier",
"description",
"args",
"return_values",
"is_classmethod",
)
global_lookup = {}
def __init__(self, rna_func):
self.bl_func = rna_func
self.identifier = rna_func.identifier
# self.name = rna_func.name # functions have no name!
self.description = rna_func.description.strip()
self.is_classmethod = not rna_func.use_self
self.args = []
self.return_values = ()
def build(self):
rna_func = self.bl_func
parent_id = rna_func
self.return_values = []
for rna_prop in rna_func.parameters.values():
prop = GetInfoPropertyRNA(rna_prop, parent_id)
if rna_prop.is_output:
self.return_values.append(prop)
else:
self.args.append(prop)
self.return_values = tuple(self.return_values)
def __str__(self):
txt = ''
txt += ' * ' + self.identifier + '('
for arg in self.args:
txt += arg.identifier + ', '
txt += '): ' + self.description
return txt
class InfoOperatorRNA:
__slots__ = (
"bl_op",
"identifier",
"name",
"module_name",
"func_name",
"description",
"args",
)
global_lookup = {}
def __init__(self, rna_op):
self.bl_op = rna_op
self.identifier = rna_op.identifier
mod, name = self.identifier.split("_OT_", 1)
self.module_name = mod.lower()
self.func_name = name
# self.name = rna_func.name # functions have no name!
self.description = rna_op.description.strip()
self.args = []
def build(self):
rna_op = self.bl_op
parent_id = self.identifier
for rna_id, rna_prop in rna_op.properties.items():
if rna_id == "rna_type":
continue
prop = GetInfoPropertyRNA(rna_prop, parent_id)
self.args.append(prop)
def get_location(self):
op_class = getattr(bpy.types, self.identifier)
op_func = getattr(op_class, "execute", None)
if op_func is None:
op_func = getattr(op_class, "invoke", None)
if op_func is None:
op_func = getattr(op_class, "poll", None)
if op_func:
op_code = op_func.__code__
source_path = op_code.co_filename
# clear the prefix
for p in script_paths:
source_path = source_path.split(p)[-1]
if source_path[0] in "/\\":
source_path = source_path[1:]
return source_path, op_code.co_firstlineno
else:
return None, None
def _GetInfoRNA(bl_rna, cls, parent_id=""):
if bl_rna is None:
return None
key = parent_id, bl_rna.identifier
try:
return cls.global_lookup[key]
except KeyError:
instance = cls.global_lookup[key] = cls(bl_rna)
return instance
def GetInfoStructRNA(bl_rna):
return _GetInfoRNA(bl_rna, InfoStructRNA)
def GetInfoPropertyRNA(bl_rna, parent_id):
return _GetInfoRNA(bl_rna, InfoPropertyRNA, parent_id)
def GetInfoFunctionRNA(bl_rna, parent_id):
return _GetInfoRNA(bl_rna, InfoFunctionRNA, parent_id)
def GetInfoOperatorRNA(bl_rna):
return _GetInfoRNA(bl_rna, InfoOperatorRNA)
def BuildRNAInfo():
# Use for faster lookups
# use rna_struct.identifier as the key for each dict
rna_struct_dict = {} # store identifier:rna lookups
rna_full_path_dict = {} # store the result of full_rna_struct_path(rna_struct)
rna_children_dict = {} # store all rna_structs nested from here
rna_references_dict = {} # store a list of rna path strings that reference this type
# rna_functions_dict = {} # store all functions directly in this type (not inherited)
def full_rna_struct_path(rna_struct):
"""
Needed when referencing one struct from another
"""
nested = rna_struct.nested
if nested:
return "%s.%s" % (full_rna_struct_path(nested), rna_struct.identifier)
else:
return rna_struct.identifier
# def write_func(rna_func, ident):
def base_id(rna_struct):
try:
return rna_struct.base.identifier
except:
return "" # invalid id
#structs = [(base_id(rna_struct), rna_struct.identifier, rna_struct) for rna_struct in bpy.doc.structs.values()]
'''
structs = []
for rna_struct in bpy.doc.structs.values():
structs.append( (base_id(rna_struct), rna_struct.identifier, rna_struct) )
'''
structs = []
for rna_type_name in dir(bpy.types):
rna_type = getattr(bpy.types, rna_type_name)
rna_struct = getattr(rna_type, "bl_rna", None)
if rna_struct:
#if not rna_type_name.startswith('__'):
identifier = rna_struct.identifier
if not rna_id_ignore(identifier):
structs.append((base_id(rna_struct), identifier, rna_struct))
# Simple lookup
rna_struct_dict[identifier] = rna_struct
# Store full rna path 'GameObjectSettings' -> 'Object.GameObjectSettings'
rna_full_path_dict[identifier] = full_rna_struct_path(rna_struct)
# Store a list of functions, remove inherited later
# NOT USED YET
## rna_functions_dict[identifier] = get_direct_functions(rna_struct)
# fill in these later
rna_children_dict[identifier] = []
rna_references_dict[identifier] = []
else:
print("Ignoring", rna_type_name)
structs.sort() # not needed but speeds up sort below, setting items without an inheritance first
# Arrange so classes are always defined in the correct order
deps_ok = False
while deps_ok is False:
deps_ok = True
rna_done = set()
for i, (rna_base, identifier, rna_struct) in enumerate(structs):
rna_done.add(identifier)
if rna_base and rna_base not in rna_done:
deps_ok = False
data = structs.pop(i)
ok = False
while i < len(structs):
if structs[i][1] == rna_base:
structs.insert(i + 1, data) # insert after the item we depend on.
ok = True
break
i += 1
if not ok:
print('Dependancy "%s" could not be found for "%s"' % (identifier, rna_base))
break
# Done ordering structs
# precalculate vars to avoid a lot of looping
for (rna_base, identifier, rna_struct) in structs:
# rna_struct_path = full_rna_struct_path(rna_struct)
rna_struct_path = rna_full_path_dict[identifier]
for rna_prop in get_direct_properties(rna_struct):
rna_prop_identifier = rna_prop.identifier
if rna_prop_identifier == 'RNA' or rna_id_ignore(rna_prop_identifier):
continue
for rna_prop_ptr in (getattr(rna_prop, "fixed_type", None), getattr(rna_prop, "srna", None)):
# Does this property point to me?
if rna_prop_ptr:
rna_references_dict[rna_prop_ptr.identifier].append("%s.%s" % (rna_struct_path, rna_prop_identifier))
for rna_func in get_direct_functions(rna_struct):
for rna_prop_identifier, rna_prop in rna_func.parameters.items():
if rna_prop_identifier == 'RNA' or rna_id_ignore(rna_prop_identifier):
continue
rna_prop_ptr = getattr(rna_prop, "fixed_type", None)
# Does this property point to me?
if rna_prop_ptr:
rna_references_dict[rna_prop_ptr.identifier].append("%s.%s" % (rna_struct_path, rna_func.identifier))
# Store nested children
nested = rna_struct.nested
if nested:
rna_children_dict[nested.identifier].append(rna_struct)
# Sort the refs, just reads nicer
for rna_refs in rna_references_dict.values():
rna_refs.sort()
info_structs = []
for (rna_base, identifier, rna_struct) in structs:
#if rna_struct.nested:
# continue
#write_struct(rna_struct, '')
info_struct = GetInfoStructRNA(rna_struct)
if rna_base:
info_struct.base = GetInfoStructRNA(rna_struct_dict[rna_base])
info_struct.nested = GetInfoStructRNA(rna_struct.nested)
info_struct.children[:] = rna_children_dict[identifier]
info_struct.references[:] = rna_references_dict[identifier]
info_struct.full_path = rna_full_path_dict[identifier]
info_structs.append(info_struct)
for rna_info_prop in InfoPropertyRNA.global_lookup.values():
rna_info_prop.build()
for rna_info_prop in InfoFunctionRNA.global_lookup.values():
rna_info_prop.build()
for rna_info in InfoStructRNA.global_lookup.values():
rna_info.build()
for prop in rna_info.properties:
prop.build()
for func in rna_info.functions:
func.build()
for prop in func.args:
prop.build()
for prop in func.return_values:
prop.build()
# there are too many invalid defaults, unless we intend to fix, leave this off
if 0:
for rna_info in InfoStructRNA.global_lookup.values():
for prop in rna_info.properties:
# ERROR CHECK
default = prop.default
if type(default) in {float, int}:
if default < prop.min or default > prop.max:
print("\t %s.%s, %s not in [%s - %s]" % (rna_info.identifier, prop.identifier, default, prop.min, prop.max))
# now for operators
op_mods = dir(bpy.ops)
for op_mod_name in sorted(op_mods):
if op_mod_name.startswith('__'):
continue
op_mod = getattr(bpy.ops, op_mod_name)
operators = dir(op_mod)
for op in sorted(operators):
try:
rna_prop = getattr(op_mod, op).get_rna()
except AttributeError:
rna_prop = None
except TypeError:
rna_prop = None
if rna_prop:
GetInfoOperatorRNA(rna_prop.bl_rna)
for rna_info in InfoOperatorRNA.global_lookup.values():
rna_info.build()
for rna_prop in rna_info.args:
rna_prop.build()
#for rna_info in InfoStructRNA.global_lookup.values():
# print(rna_info)
return InfoStructRNA.global_lookup, InfoFunctionRNA.global_lookup, InfoOperatorRNA.global_lookup, InfoPropertyRNA.global_lookup
if __name__ == "__main__":
import rna_info
struct = rna_info.BuildRNAInfo()[0]
data = []
for struct_id, v in sorted(struct.items()):
struct_id_str = v.identifier #~ "".join(sid for sid in struct_id if struct_id)
for base in v.get_bases():
struct_id_str = base.identifier + "|" + struct_id_str
props = [(prop.identifier, prop) for prop in v.properties]
for prop_id, prop in sorted(props):
# if prop.type == "boolean":
# continue
prop_type = prop.type
if prop.array_length > 0:
prop_type += "[%d]" % prop.array_length
data.append("%s.%s -> %s: %s%s %s" % (struct_id_str, prop.identifier, prop.identifier, prop_type, ", (read-only)" if prop.is_readonly else "", prop.description))
data.sort()
if bpy.app.background:
import sys
sys.stderr.write("\n".join(data))
sys.stderr.write("\n\nEOF\n")
else:
text = bpy.data.texts.new(name="api.py")
text.from_string(data)
| gpl-3.0 |
markgw/pimlico | src/python/pimlico/old_datatypes/modules/features/term_feature_matrix_builder/execute.py | 1 | 1433 | # This file is part of Pimlico
# Copyright (C) 2020 Mark Granroth-Wilding
# Licensed under the GNU LGPL v3.0 - https://www.gnu.org/licenses/lgpl-3.0.en.html
import numpy
from scipy.sparse.dok import dok_matrix
from pimlico.core.modules.base import BaseModuleExecutor
from pimlico.old_datatypes.arrays import ScipySparseMatrixWriter
from pimlico.utils.progress import get_progress_bar
class ModuleExecutor(BaseModuleExecutor):
def execute(self):
input_data = self.info.get_input("data")
self.log.info("Collecting features into a %d x %d sparse matrix from %d data points" %
(len(input_data.term_dictionary), len(input_data.feature_dictionary), len(input_data)))
pbar = get_progress_bar(len(input_data), title="Collecting")
matrix = dok_matrix((len(input_data.term_dictionary), len(input_data.feature_dictionary)), dtype=numpy.int32)
# Iterate over the input data and collect up counts from all instances of each term
for term, feature_counts in pbar(input_data):
for feature, count in feature_counts.items():
matrix[term, feature] += count
# Write out the matrix
self.log.info("Built matrix: writing to disk")
with ScipySparseMatrixWriter(self.info.get_absolute_output_dir("matrix")) as writer:
# Matrix will be converted to COO format before writing
writer.set_matrix(matrix)
| gpl-3.0 |
IanSav/enigma2 | lib/python/Screens/Ci.py | 2 | 18127 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Tools.BoundFunction import boundFunction
from Components.Sources.StaticText import StaticText
from Components.ActionMap import ActionMap
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.config import config, ConfigSubsection, ConfigSelection, ConfigSubList, getConfigListEntry, KEY_LEFT, KEY_RIGHT, KEY_0, ConfigNothing, ConfigPIN, ConfigYesNo, NoSave
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.SystemInfo import SystemInfo
from enigma import eTimer, eDVBCI_UI, eDVBCIInterfaces
import Screens.Standby
forceNotShowCiMessages = False
def setCIBitrate(configElement):
eDVBCI_UI.getInstance().setClockRate(configElement.slotid, eDVBCI_UI.rateNormal if configElement.value == "no" else eDVBCI_UI.rateHigh)
def setdvbCiDelay(configElement):
open(SystemInfo["CommonInterfaceCIDelay"], "w").write(configElement.value)
configElement.save()
def setRelevantPidsRouting(configElement):
open(SystemInfo["CI%dRelevantPidsRoutingSupport" % configElement.slotid], "w").write("yes" if configElement.value else "no")
def InitCiConfig():
config.ci = ConfigSubList()
config.cimisc = ConfigSubsection()
if SystemInfo["CommonInterface"]:
for slot in range(SystemInfo["CommonInterface"]):
config.ci.append(ConfigSubsection())
config.ci[slot].canDescrambleMultipleServices = ConfigSelection(choices = [("auto", _("auto")), ("no", _("no")), ("yes", _("yes"))], default = "auto")
config.ci[slot].use_static_pin = ConfigYesNo(default = True)
config.ci[slot].static_pin = ConfigPIN(default = 0)
config.ci[slot].show_ci_messages = ConfigYesNo(default = True)
if SystemInfo["CI%dSupportsHighBitrates" % slot]:
config.ci[slot].canHandleHighBitrates = ConfigYesNo(default = True)
config.ci[slot].canHandleHighBitrates.slotid = slot
config.ci[slot].canHandleHighBitrates.addNotifier(setCIBitrate)
if SystemInfo["CI%dRelevantPidsRoutingSupport" % slot]:
config.ci[slot].relevantPidsRouting = ConfigYesNo(default = False)
config.ci[slot].relevantPidsRouting.slotid = slot
config.ci[slot].relevantPidsRouting.addNotifier(setRelevantPidsRouting)
if SystemInfo["CommonInterfaceCIDelay"]:
config.cimisc.dvbCiDelay = ConfigSelection(default = "256", choices = [("16"), ("32"), ("64"), ("128"), ("256")])
config.cimisc.dvbCiDelay.addNotifier(setdvbCiDelay)
class MMIDialog(Screen):
def __init__(self, session, slotid, action, handler=eDVBCI_UI.getInstance(), wait_text="", screen_data=None):
Screen.__init__(self, session)
print "MMIDialog with action" + str(action)
self.mmiclosed = False
self.tag = None
self.slotid = slotid
self.timer = eTimer()
self.timer.callback.append(self.keyCancel)
#else the skins fails
self["title"] = Label("")
self["subtitle"] = Label("")
self["bottom"] = Label("")
self["entries"] = ConfigList([ ])
self["actions"] = NumberActionMap(["SetupActions", "MenuActions"],
{
"ok": self.okbuttonClick,
"cancel": self.keyCancel,
"menu": self.forceExit,
#for PIN
"left": self.keyLeft,
"right": self.keyRight,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.action = action
self.screen_data = screen_data
self.is_pin_list = -1
self.handler = handler
if wait_text == "":
self.wait_text = _("wait for ci...")
else:
self.wait_text = wait_text
if action == 2: #start MMI
handler.startMMI(self.slotid)
self.showWait()
elif action == 3: #mmi already there (called from infobar)
self.showScreen()
def addEntry(self, list, entry):
if entry[0] == "TEXT": #handle every item (text / pin only?)
list.append( (entry[1], ConfigNothing(), entry[2]) )
if entry[0] == "PIN":
pinlength = entry[1]
if entry[3] == 1:
# masked pins:
x = ConfigPIN(0, len = pinlength, censor = "*")
else:
# unmasked pins:
x = ConfigPIN(0, len = pinlength)
x.addEndNotifier(self.pinEntered)
self["subtitle"].setText(entry[2])
list.append( getConfigListEntry("", x) )
self["bottom"].setText(_("please press OK when ready"))
def pinEntered(self, value):
self.okbuttonClick()
def okbuttonClick(self):
self.timer.stop()
if not self.tag:
return
if self.tag == "WAIT":
print "do nothing - wait"
elif self.tag == "MENU":
print "answer MENU"
cur = self["entries"].getCurrent()
if cur:
self.handler.answerMenu(self.slotid, cur[2])
else:
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "LIST":
print "answer LIST"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
cur = self["entries"].getCurrent()
answer = str(cur[1].value)
length = len(answer)
while length < cur[1].getLength():
answer = '0' + answer
length += 1
self.answer = answer
if config.ci[self.slotid].use_static_pin.value:
self.session.openWithCallback(self.save_PIN_CB, MessageBox, _("Would you save the entered PIN %s persistent?") % self.answer, MessageBox.TYPE_YESNO)
else:
self.save_PIN_CB(False)
def save_PIN_CB(self, ret=None):
if ret:
config.ci[self.slotid].static_pin.value = self.answer
config.ci[self.slotid].static_pin.save()
self.handler.answerEnq(self.slotid, self.answer)
self.showWait()
def closeMmi(self):
self.timer.stop()
self.close(self.slotid)
def forceExit(self):
self.timer.stop()
if self.tag == "WAIT":
self.handler.stopMMI(self.slotid)
global forceNotShowCiMessages
forceNotShowCiMessages = True
self.close(self.slotid)
def keyCancel(self):
self.timer.stop()
if not self.tag or self.mmiclosed:
self.closeMmi()
elif self.tag == "WAIT":
self.handler.stopMMI(self.slotid)
self.closeMmi()
elif self.tag in ( "MENU", "LIST" ):
print "cancel list"
self.handler.answerMenu(self.slotid, 0)
self.showWait()
elif self.tag == "ENQ":
print "cancel enq"
self.handler.cancelEnq(self.slotid)
self.showWait()
else:
print "give cancel action to ci"
def keyConfigEntry(self, key):
self.timer.stop()
try:
self["entries"].handleKey(key)
if self.is_pin_list == 4:
self.okbuttonClick()
except:
pass
def keyNumberGlobal(self, number):
self.timer.stop()
if self.is_pin_list > -1:
self.is_pin_list += 1
self.keyConfigEntry(KEY_0 + number)
def keyLeft(self):
self.timer.stop()
if self.is_pin_list > 0:
self.is_pin_list += -1
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.timer.stop()
if self.is_pin_list > -1 and self.is_pin_list < 4:
self.is_pin_list += 1
self.keyConfigEntry(KEY_RIGHT)
def updateList(self, list):
List = self["entries"]
try:
List.instance.moveSelectionTo(0)
except:
pass
List.l.setList(list)
def showWait(self):
self.tag = "WAIT"
self["title"].setText("")
self["subtitle"].setText("")
self["bottom"].setText("")
list = [ ]
list.append( (self.wait_text, ConfigNothing()) )
self.updateList(list)
def showScreen(self):
if self.screen_data is not None:
screen = self.screen_data
self.screen_data = None
else:
screen = self.handler.getMMIScreen(self.slotid)
list = [ ]
self.timer.stop()
if len(screen) > 0 and screen[0][0] == "CLOSE":
timeout = screen[0][1]
self.mmiclosed = True
if timeout > 0:
self.timer.start(timeout*1000, True)
else:
self.keyCancel()
else:
self.mmiclosed = False
self.tag = screen[0][0]
for entry in screen:
if entry[0] == "PIN":
if config.ci[self.slotid].use_static_pin.value and str(config.ci[self.slotid].static_pin.value) != "0":
answer = str(config.ci[self.slotid].static_pin.value)
length = len(answer)
while length < config.ci[self.slotid].static_pin.getLength():
answer = '0' + answer
length += 1
self.handler.answerEnq(self.slotid, answer)
self.showWait()
break
else:
self.is_pin_list = 0
self.addEntry(list, entry)
else:
if entry[0] == "TITLE":
self["title"].setText(entry[1])
elif entry[0] == "SUBTITLE":
self["subtitle"].setText(entry[1])
elif entry[0] == "BOTTOM":
self["bottom"].setText(entry[1])
elif entry[0] == "TEXT":
self.addEntry(list, entry)
self.updateList(list)
def ciStateChanged(self):
do_close = False
if self.action == 0: #reset
do_close = True
if self.action == 1: #init
do_close = True
#module still there ?
if self.handler.getState(self.slotid) != 2:
do_close = True
#mmi session still active ?
if self.handler.getMMIState(self.slotid) != 1:
do_close = True
if do_close:
self.closeMmi()
elif self.action > 1 and self.handler.availableMMI(self.slotid) == 1:
self.showScreen()
#FIXME: check for mmi-session closed
class CiMessageHandler:
def __init__(self):
self.session = None
self.auto_close = False
self.ci = { }
self.dlgs = { }
eDVBCI_UI.getInstance().ciStateChanged.get().append(self.ciStateChanged)
def setSession(self, session):
self.session = session
def ciStateChanged(self, slot):
if slot in self.ci:
self.ci[slot](slot)
else:
handler = eDVBCI_UI.getInstance()
if slot in self.dlgs:
self.dlgs[slot].ciStateChanged()
elif handler.availableMMI(slot) == 1:
if self.session:
show_ui = False
if config.ci[slot].show_ci_messages.value:
show_ui = True
screen_data = handler.getMMIScreen(slot)
if config.ci[slot].use_static_pin.value:
if screen_data is not None and len(screen_data):
ci_tag = screen_data[0][0]
if ci_tag == 'ENQ' and len(screen_data) >= 2 and screen_data[1][0] == 'PIN':
if str(config.ci[slot].static_pin.value) == "0":
show_ui = True
else:
answer = str(config.ci[slot].static_pin.value)
length = len(answer)
while length < config.ci[slot].static_pin.getLength():
answer = '0' + answer
length += 1
handler.answerEnq(slot, answer)
show_ui = False
self.auto_close = True
elif ci_tag == 'CLOSE' and self.auto_close:
show_ui = False
self.auto_close = False
if show_ui and not forceNotShowCiMessages and not Screens.Standby.inStandby:
self.dlgs[slot] = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, 3, screen_data = screen_data)
def dlgClosed(self, slot):
if slot in self.dlgs:
del self.dlgs[slot]
def registerCIMessageHandler(self, slot, func):
self.unregisterCIMessageHandler(slot)
self.ci[slot] = func
def unregisterCIMessageHandler(self, slot):
if slot in self.ci:
del self.ci[slot]
CiHandler = CiMessageHandler()
class CiSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "CiSelectionActions"],
{
"left": self.keyLeft,
"right": self.keyLeft,
"ok": self.okbuttonClick,
"cancel": self.cancel
},-1)
self.dlg = None
self.state = { }
self.list = [ ]
self.slot = 0
for slot in range(SystemInfo["CommonInterface"]):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
self.slot += 1
self.appendEntries(slot, state)
CiHandler.registerCIMessageHandler(slot, self.ciStateChanged)
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["entries"] = menuList
self["entries"].onSelectionChanged.append(self.selectionChanged)
self["text"] = Label(_("Slot %d") % 1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
global forceNotShowCiMessages
forceNotShowCiMessages = False
self.setTitle(_("Common Interface"))
def selectionChanged(self):
if self.slot > 1:
cur = self["entries"].getCurrent()
if cur and len(cur) > 2:
self["text"].setText(cur[0] == "**************************" and " " or cur[0] == _("DVB CI Delay") and _("All slots") or _("Slot %d") % (cur[3] + 1))
def keyConfigEntry(self, key):
try:
self["entries"].handleKey(key)
self["entries"].getCurrent()[1].save()
except:
pass
def keyLeft(self):
self.keyConfigEntry(KEY_LEFT)
def keyRight(self):
self.keyConfigEntry(KEY_RIGHT)
def appendEntries(self, slot, state):
self.state[slot] = state
if self.slot > 1:
self.list.append(("**************************", ConfigNothing(), 3, slot))
self.list.append((_("Reset"), ConfigNothing(), 0, slot))
self.list.append((_("Init"), ConfigNothing(), 1, slot))
if self.state[slot] == 0: #no module
self.list.append((_("no module found"), ConfigNothing(), 2, slot))
elif self.state[slot] == 1: #module in init
self.list.append((_("init module"), ConfigNothing(), 2, slot))
elif self.state[slot] == 2: #module ready
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list.append((appname, ConfigNothing(), 2, slot))
self.list.append(getConfigListEntry(_("Set pin code persistent"), config.ci[slot].use_static_pin, 3, slot))
self.list.append((_("Enter persistent PIN code"), ConfigNothing(), 5, slot))
self.list.append((_("Reset persistent PIN code"), ConfigNothing(), 6, slot))
self.list.append(getConfigListEntry(_("Show CI messages"), config.ci[slot].show_ci_messages, 3, slot))
self.list.append(getConfigListEntry(_("Multiple service support"), config.ci[slot].canDescrambleMultipleServices, 3, slot))
if SystemInfo["CI%dSupportsHighBitrates" % slot]:
self.list.append(getConfigListEntry(_("High bitrate support"), config.ci[slot].canHandleHighBitrates, 3, slot))
if SystemInfo["CI%dRelevantPidsRoutingSupport" % slot]:
self.list.append(getConfigListEntry(_("Relevant PIDs routing"), config.ci[slot].relevantPidsRouting, 3, slot))
if SystemInfo["CommonInterfaceCIDelay"]:
self.list.append(getConfigListEntry(_("DVB CI Delay"), config.cimisc.dvbCiDelay, 3, slot))
def updateState(self, slot):
state = eDVBCI_UI.getInstance().getState(slot)
self.state[slot] = state
slotidx = 0
while len(self.list[slotidx]) < 3 or self.list[slotidx][3] != slot:
slotidx += 1
slotidx += 1 #do not change Reset
slotidx += 1 #do not change Init
if state == 0: #no module
self.list[slotidx] = (_("no module found"), ConfigNothing(), 2, slot)
elif state == 1: #module in init
self.list[slotidx] = (_("init module"), ConfigNothing(), 2, slot)
elif state == 2: #module ready
appname = eDVBCI_UI.getInstance().getAppName(slot)
self.list[slotidx] = (appname, ConfigNothing(), 2, slot)
lst = self["entries"]
lst.list = self.list
lst.l.setList(self.list)
def ciStateChanged(self, slot):
if self.dlg:
self.dlg.ciStateChanged()
else:
state = eDVBCI_UI.getInstance().getState(slot)
if self.state[slot] != state:
self.state[slot] = state
self.updateState(slot)
def dlgClosed(self, slot):
self.dlg = None
def okbuttonClick(self):
cur = self["entries"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 3:
pass
elif action == 0: #reset
eDVBCI_UI.getInstance().setReset(slot)
elif action == 1: #init
eDVBCI_UI.getInstance().setInit(slot)
elif action == 5:
self.session.openWithCallback(self.cancelCB, PermanentPinEntry, config.ci[slot].static_pin, _("Smartcard PIN"))
elif action == 6:
config.ci[slot].static_pin.value = 0
config.ci[slot].static_pin.save()
self.session.openWithCallback(self.cancelCB, MessageBox, _("The saved PIN was cleared."), MessageBox.TYPE_INFO)
elif self.state[slot] == 2:
self.dlg = self.session.openWithCallback(self.dlgClosed, MMIDialog, slot, action)
def cancelCB(self, value):
pass
def cancel(self):
for slot in range(SystemInfo["CommonInterface"]):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
CiHandler.unregisterCIMessageHandler(slot)
self.close()
class PermanentPinEntry(Screen, ConfigListScreen):
def __init__(self, session, pin, pin_slot):
Screen.__init__(self, session)
self.skinName = ["ParentalControlChangePin", "Setup" ]
self.setup_title = _("Enter pin code")
self.onChangedEntry = [ ]
self.slot = pin_slot
self.pin = pin
self.list = []
self.pin1 = ConfigPIN(default = 0, censor = "*")
self.pin2 = ConfigPIN(default = 0, censor = "*")
self.pin1.addEndNotifier(boundFunction(self.valueChanged, 1))
self.pin2.addEndNotifier(boundFunction(self.valueChanged, 2))
self.list.append(getConfigListEntry(_("Enter PIN"), NoSave(self.pin1)))
self.list.append(getConfigListEntry(_("Reenter PIN"), NoSave(self.pin2)))
ConfigListScreen.__init__(self, self.list)
self["actions"] = NumberActionMap(["DirectionActions", "ColorActions", "OkCancelActions"],
{
"cancel": self.cancel,
"red": self.cancel,
"save": self.keyOK,
}, -1)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def valueChanged(self, pin, value):
if pin == 1:
self["config"].setCurrentIndex(1)
elif pin == 2:
self.keyOK()
def keyOK(self):
if self.pin1.value == self.pin2.value:
self.pin.value = self.pin1.value
self.pin.save()
self.session.openWithCallback(self.close, MessageBox, _("The PIN code has been saved successfully."), MessageBox.TYPE_INFO)
else:
self.session.open(MessageBox, _("The PIN codes you entered are different."), MessageBox.TYPE_ERROR)
def cancel(self):
self.close(None)
def keyNumberGlobal(self, number):
ConfigListScreen.keyNumberGlobal(self, number)
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
| gpl-2.0 |
pythonbyexample/PBE | dbe/businesstest/migrations/0008_auto__add_field_message_to_factory.py | 1 | 7041 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Message.to_factory'
db.add_column('businesstest_message', 'to_factory', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'Message.to_factory'
db.delete_column('businesstest_message', 'to_factory')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'businesstest.entry': {
'Meta': {'object_name': 'Entry'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'eset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': "orm['businesstest.Set']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': "orm['businesstest.Task']"}),
'time_taken': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'businesstest.message': {
'Meta': {'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'global_msg': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'message_senders'", 'to': "orm['auth.User']"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'to_factory': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'businesstest.set': {
'Meta': {'ordering': "['created']", 'object_name': 'Set'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time_taken': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'businesstest.task': {
'Meta': {'ordering': "['number']", 'object_name': 'Task'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.FloatField', [], {}),
'options': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'tasktype': ('django.db.models.fields.CharField', [], {'default': "'single'", 'max_length': '20'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['businesstest']
| bsd-3-clause |
eeshangarg/zulip | zerver/migrations/0143_realm_bot_creation_policy.py | 6 | 1752 | # Generated by Django 1.11.6 on 2018-03-09 18:00
from django.db import migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
BOT_CREATION_EVERYONE = 1
def set_initial_value_for_bot_creation_policy(
apps: StateApps, schema_editor: DatabaseSchemaEditor
) -> None:
Realm = apps.get_model("zerver", "Realm")
Realm.BOT_CREATION_EVERYONE = 1
Realm.BOT_CREATION_LIMIT_GENERIC_BOTS = 2
for realm in Realm.objects.all():
if realm.create_generic_bot_by_admins_only:
realm.bot_creation_policy = Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
else:
realm.bot_creation_policy = Realm.BOT_CREATION_EVERYONE
realm.save(update_fields=["bot_creation_policy"])
def reverse_code(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Realm.BOT_CREATION_EVERYONE = 1
for realm in Realm.objects.all():
if realm.bot_creation_policy == Realm.BOT_CREATION_EVERYONE:
realm.create_generic_bot_by_admins_only = False
else:
realm.create_generic_bot_by_admins_only = True
realm.save(update_fields=["create_generic_bot_by_admins_only"])
class Migration(migrations.Migration):
dependencies = [
("zerver", "0142_userprofile_translate_emoticons"),
]
operations = [
migrations.AddField(
model_name="realm",
name="bot_creation_policy",
field=models.PositiveSmallIntegerField(default=BOT_CREATION_EVERYONE),
),
migrations.RunPython(
set_initial_value_for_bot_creation_policy, reverse_code=reverse_code, elidable=True
),
]
| apache-2.0 |
rartino/ENVISIoN | demo/molecular_dynamics.py | 1 | 2653 | ## ENVISIoN
##
## Copyright (c) 2021 Gabriel Anderberg, Didrik Axén, Adam Engman,
## Kristoffer Gubberud Maras, Joakim Stenborg
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## ##############################################################################################
import sys, os, inspect
import os, sys, inspect, inviwopy
path_to_current_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(path_to_current_folder + "/../")
import envisionpy
import envisionpy.hdf5parser
from envisionpy.network import VisualisationManager
#--------VASP-------
#Path to VASP-files and path to where the generated HDF5-file will be located.
VASP_DIR = path_to_current_folder + "/../unit_testing/resources/MD/VASP/Al_300K"
#HDF5_FILE = path_to_current_folder + "/../demo_molecular_dynamics.hdf5"
#--------Premade HDF5-files-------
#Path to a HDF5-file already generated by the molecular_dynamics parser
HDF5_FILE = path_to_current_folder + "/../md_test.hdf5"
#HDF5_FILE = path_to_current_folder + "/../test_md_2punkt0.hdf5"
#parse the VASP-file for molecular dynamics
envisionpy.hdf5parser.mol_dynamic_parser(HDF5_FILE, VASP_DIR)
#clear any old network
inviwopy.app.network.clear()
#Initialize inviwo network
visManager = VisualisationManager(HDF5_FILE, inviwopy.app, True)
visManager.start("molecular_dynamics")
| bsd-2-clause |
sbusso/rethinkdb | external/v8_3.30.33.16/build/gyp/test/rename/gyptest-filecase.py | 320 | 1110 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Checks that files whose file case changes get rebuilt correctly.
"""
import os
import TestGyp
test = TestGyp.TestGyp()
CHDIR = 'filecase'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
os.rename('filecase/file.c', 'filecase/fIlE.c')
test.write('filecase/test.gyp',
test.read('filecase/test.gyp').replace('file.c', 'fIlE.c'))
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
# Check that having files that differ just in their case still work on
# case-sensitive file systems.
test.write('filecase/FiLe.c', 'int f(); int main() { return f(); }')
test.write('filecase/fIlE.c', 'int f() { return 42; }')
is_case_sensitive = test.read('filecase/FiLe.c') != test.read('filecase/fIlE.c')
if is_case_sensitive:
test.run_gyp('test-casesensitive.gyp', chdir=CHDIR)
test.build('test-casesensitive.gyp', test.ALL, chdir=CHDIR)
test.pass_test()
| agpl-3.0 |
rolandmansilla/microblog | flask/lib/python2.7/site-packages/flask/templating.py | 783 | 4707 | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
| bsd-3-clause |
saisaizhang/Food | flask/lib/python2.7/site-packages/pip/_vendor/requests/auth.py | 44 | 6702 | # -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
from base64 import b64encode
from .compat import urlparse, str
from .cookies import extract_cookies_to_jar
from .utils import parse_dict_header, to_native_string
from .status_codes import codes
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authentication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_redirect(self, r, **kwargs):
"""Reset num_401_calls counter on redirects."""
if r.is_redirect:
setattr(self, 'num_401_calls', 1)
def handle_401(self, r, **kwargs):
"""Takes the given response and tries digest-auth, if needed."""
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', num_401_calls + 1)
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
# In the case of HTTPDigestAuth being reused and the body of
# the previous request was a file-like object, pos has the
# file position of the previous body. Ensure it's set to
# None.
self.pos = None
r.register_hook('response', self.handle_401)
r.register_hook('response', self.handle_redirect)
return r
| bsd-3-clause |
dangerdak/apuniverse | apuniverse/blog/migrations/0001_initial.py | 1 | 2045 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table('blog_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=40)),
('text', self.gf('ckeditor.fields.RichTextField')()),
('status', self.gf('django.db.models.fields.CharField')(max_length=9, default='published')),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('date_created', self.gf('django.db.models.fields.DateTimeField')()),
('last_modified', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('blog', ['Post'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table('blog_post')
models = {
'blog.post': {
'Meta': {'ordering': "['-pub_date']", 'object_name': 'Post'},
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '40'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '9', 'default': "'published'"}),
'text': ('ckeditor.fields.RichTextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog'] | mit |
bigbiff/android_kernel_samsung_sm-p605 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
ucbvislab/radiotool | radiotool/composer/timestretchsegment.py | 1 | 1360 | from segment import Segment
from scipy.signal import resample
class TimeStretchSegment(Segment):
"""Like a :py:class:`radiotool.composer.Segment`, but stretches
time to fit a specified duration.
"""
def __init__(self, track, comp_location, start, orig_duration, new_duration):
"""Create a time-stetched segment.
It acts like a :py:class:`radiotool.composer.Segment` but you
can specify the target duration. The segment will then
resample its frames to meet this duration.
:param track: Track to slice
:type track: :py:class:`radiotool.composer.Track`
:param float comp_location: Location in composition to play this segment (in seconds)
:param float start: Start of segment (in seconds)
:param float orig_duration: Original duration of segment (in seconds)
:param float new_duration: Target (stretched) duration of segment (in seconds)
"""
Segment.__init__(self, track, comp_location, start, new_duration)
self.orig_duration = int(orig_duration * self.samplerate)
def get_frames(self, channels=2):
self.track.current_frame = self.start
frames = self.track.read_frames(self.orig_duration, channels=channels)
frames = resample(frames, self.duration)
self.track.current_frame = 0
return frames | isc |
sputnik13/kubernetes | vendor/github.com/ugorji/go/codec/test.py | 181 | 4029 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"some&day>some<day",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('127.0.0.1', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('127.0.0.1', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('127.0.0.1', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
kevclarx/ansible | lib/ansible/modules/source_control/github_release.py | 12 | 3323 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: github_release
short_description: Interact with GitHub Releases
description:
- Fetch metadata about Github Releases
version_added: 2.2
options:
token:
required: true
description:
- Github Personal Access Token for authenticating
user:
required: true
description:
- The GitHub account that owns the repository
repo:
required: true
description:
- Repository name
action:
required: true
description:
- Action to perform
choices: [ 'latest_release' ]
author:
- "Adrian Moisey (@adrianmoisey)"
requirements:
- "github3.py >= 1.0.0a3"
'''
EXAMPLES = '''
- name: Get latest release of test/test
github:
token: tokenabc1234567890
user: testuser
repo: testrepo
action: latest_release
'''
RETURN = '''
latest_release:
description: Version of the latest release
type: string
returned: success
sample: 1.1.0
'''
try:
import github3
HAS_GITHUB_API = True
except ImportError:
HAS_GITHUB_API = False
def main():
module = AnsibleModule(
argument_spec=dict(
repo=dict(required=True),
user=dict(required=True),
token=dict(required=True, no_log=True),
action=dict(required=True, choices=['latest_release']),
),
supports_check_mode=True
)
if not HAS_GITHUB_API:
module.fail_json(msg='Missing required github3 module (check docs or '
'install with: pip install github3.py==1.0.0a4)')
repo = module.params['repo']
user = module.params['user']
login_token = module.params['token']
action = module.params['action']
# login to github
try:
gh = github3.login(token=str(login_token))
# test if we're actually logged in
gh.me()
except github3.AuthenticationFailed:
e = get_exception()
module.fail_json(msg='Failed to connect to Github: %s' % e)
repository = gh.repository(str(user), str(repo))
if not repository:
module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
if action == 'latest_release':
release = repository.latest_release()
if release:
module.exit_json(tag=release.tag_name)
else:
module.exit_json(tag=None)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
mezz64/home-assistant | homeassistant/components/zeroconf/__init__.py | 7 | 12731 | """Support for exposing Home Assistant via Zeroconf."""
import fnmatch
from functools import partial
import ipaddress
import logging
import socket
import voluptuous as vol
from zeroconf import (
DNSPointer,
DNSRecord,
Error as ZeroconfError,
InterfaceChoice,
IPVersion,
NonUniqueNameException,
ServiceBrowser,
ServiceInfo,
ServiceStateChange,
Zeroconf,
)
from homeassistant import util
from homeassistant.const import (
ATTR_NAME,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
__version__,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import NoURLAvailableError, get_url
from homeassistant.helpers.singleton import singleton
from homeassistant.loader import async_get_homekit, async_get_zeroconf
from .usage import install_multiple_zeroconf_catcher
_LOGGER = logging.getLogger(__name__)
DOMAIN = "zeroconf"
ATTR_HOST = "host"
ATTR_PORT = "port"
ATTR_HOSTNAME = "hostname"
ATTR_TYPE = "type"
ATTR_PROPERTIES = "properties"
ZEROCONF_TYPE = "_home-assistant._tcp.local."
HOMEKIT_TYPE = "_hap._tcp.local."
CONF_DEFAULT_INTERFACE = "default_interface"
CONF_IPV6 = "ipv6"
DEFAULT_DEFAULT_INTERFACE = False
DEFAULT_IPV6 = True
HOMEKIT_PROPERTIES = "properties"
HOMEKIT_PAIRED_STATUS_FLAG = "sf"
HOMEKIT_MODEL = "md"
# Property key=value has a max length of 255
# so we use 230 to leave space for key=
MAX_PROPERTY_VALUE_LEN = 230
# Dns label max length
MAX_NAME_LEN = 63
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(
CONF_DEFAULT_INTERFACE, default=DEFAULT_DEFAULT_INTERFACE
): cv.boolean,
vol.Optional(CONF_IPV6, default=DEFAULT_IPV6): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
@singleton(DOMAIN)
async def async_get_instance(hass):
"""Zeroconf instance to be shared with other integrations that use it."""
return await _async_get_instance(hass)
async def _async_get_instance(hass, **zcargs):
logging.getLogger("zeroconf").setLevel(logging.NOTSET)
zeroconf = await hass.async_add_executor_job(partial(HaZeroconf, **zcargs))
install_multiple_zeroconf_catcher(zeroconf)
def _stop_zeroconf(_):
"""Stop Zeroconf."""
zeroconf.ha_close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_zeroconf)
return zeroconf
class HaServiceBrowser(ServiceBrowser):
"""ServiceBrowser that only consumes DNSPointer records."""
def update_record(self, zc: "Zeroconf", now: float, record: DNSRecord) -> None:
"""Pre-Filter update_record to DNSPointers for the configured type."""
#
# Each ServerBrowser currently runs in its own thread which
# processes every A or AAAA record update per instance.
#
# As the list of zeroconf names we watch for grows, each additional
# ServiceBrowser would process all the A and AAAA updates on the network.
#
# To avoid overwhemling the system we pre-filter here and only process
# DNSPointers for the configured record name (type)
#
if record.name not in self.types or not isinstance(record, DNSPointer):
return
super().update_record(zc, now, record)
class HaZeroconf(Zeroconf):
"""Zeroconf that cannot be closed."""
def close(self):
"""Fake method to avoid integrations closing it."""
ha_close = Zeroconf.close
async def async_setup(hass, config):
"""Set up Zeroconf and make Home Assistant discoverable."""
zc_config = config.get(DOMAIN, {})
zc_args = {}
if zc_config.get(CONF_DEFAULT_INTERFACE, DEFAULT_DEFAULT_INTERFACE):
zc_args["interfaces"] = InterfaceChoice.Default
if not zc_config.get(CONF_IPV6, DEFAULT_IPV6):
zc_args["ip_version"] = IPVersion.V4Only
zeroconf = hass.data[DOMAIN] = await _async_get_instance(hass, **zc_args)
async def _async_zeroconf_hass_start(_event):
"""Expose Home Assistant on zeroconf when it starts.
Wait till started or otherwise HTTP is not up and running.
"""
uuid = await hass.helpers.instance_id.async_get()
await hass.async_add_executor_job(
_register_hass_zc_service, hass, zeroconf, uuid
)
async def _async_zeroconf_hass_started(_event):
"""Start the service browser."""
await _async_start_zeroconf_browser(hass, zeroconf)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_zeroconf_hass_start)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, _async_zeroconf_hass_started
)
return True
def _register_hass_zc_service(hass, zeroconf, uuid):
# Get instance UUID
valid_location_name = _truncate_location_name_to_valid(hass.config.location_name)
params = {
"location_name": valid_location_name,
"uuid": uuid,
"version": __version__,
"external_url": "",
"internal_url": "",
# Old base URL, for backward compatibility
"base_url": "",
# Always needs authentication
"requires_api_password": True,
}
# Get instance URL's
try:
params["external_url"] = get_url(hass, allow_internal=False)
except NoURLAvailableError:
pass
try:
params["internal_url"] = get_url(hass, allow_external=False)
except NoURLAvailableError:
pass
# Set old base URL based on external or internal
params["base_url"] = params["external_url"] or params["internal_url"]
host_ip = util.get_local_ip()
try:
host_ip_pton = socket.inet_pton(socket.AF_INET, host_ip)
except OSError:
host_ip_pton = socket.inet_pton(socket.AF_INET6, host_ip)
_suppress_invalid_properties(params)
info = ServiceInfo(
ZEROCONF_TYPE,
name=f"{valid_location_name}.{ZEROCONF_TYPE}",
server=f"{uuid}.local.",
addresses=[host_ip_pton],
port=hass.http.server_port,
properties=params,
)
_LOGGER.info("Starting Zeroconf broadcast")
try:
zeroconf.register_service(info)
except NonUniqueNameException:
_LOGGER.error(
"Home Assistant instance with identical name present in the local network"
)
async def _async_start_zeroconf_browser(hass, zeroconf):
"""Start the zeroconf browser."""
zeroconf_types = await async_get_zeroconf(hass)
homekit_models = await async_get_homekit(hass)
types = list(zeroconf_types)
if HOMEKIT_TYPE not in zeroconf_types:
types.append(HOMEKIT_TYPE)
def service_update(zeroconf, service_type, name, state_change):
"""Service state changed."""
nonlocal zeroconf_types
nonlocal homekit_models
if state_change != ServiceStateChange.Added:
return
try:
service_info = zeroconf.get_service_info(service_type, name)
except ZeroconfError:
_LOGGER.exception("Failed to get info for device %s", name)
return
if not service_info:
# Prevent the browser thread from collapsing as
# service_info can be None
_LOGGER.debug("Failed to get info for device %s", name)
return
info = info_from_service(service_info)
if not info:
# Prevent the browser thread from collapsing
_LOGGER.debug("Failed to get addresses for device %s", name)
return
_LOGGER.debug("Discovered new device %s %s", name, info)
# If we can handle it as a HomeKit discovery, we do that here.
if service_type == HOMEKIT_TYPE:
discovery_was_forwarded = handle_homekit(hass, homekit_models, info)
# Continue on here as homekit_controller
# still needs to get updates on devices
# so it can see when the 'c#' field is updated.
#
# We only send updates to homekit_controller
# if the device is already paired in order to avoid
# offering a second discovery for the same device
if (
discovery_was_forwarded
and HOMEKIT_PROPERTIES in info
and HOMEKIT_PAIRED_STATUS_FLAG in info[HOMEKIT_PROPERTIES]
):
try:
# 0 means paired and not discoverable by iOS clients)
if int(info[HOMEKIT_PROPERTIES][HOMEKIT_PAIRED_STATUS_FLAG]):
return
except ValueError:
# HomeKit pairing status unknown
# likely bad homekit data
return
for entry in zeroconf_types[service_type]:
if len(entry) > 1:
if "macaddress" in entry:
if "properties" not in info:
continue
if "macaddress" not in info["properties"]:
continue
if not fnmatch.fnmatch(
info["properties"]["macaddress"], entry["macaddress"]
):
continue
if "name" in entry:
if "name" not in info:
continue
if not fnmatch.fnmatch(info["name"], entry["name"]):
continue
hass.add_job(
hass.config_entries.flow.async_init(
entry["domain"], context={"source": DOMAIN}, data=info
)
)
_LOGGER.debug("Starting Zeroconf browser")
HaServiceBrowser(zeroconf, types, handlers=[service_update])
def handle_homekit(hass, homekit_models, info) -> bool:
"""Handle a HomeKit discovery.
Return if discovery was forwarded.
"""
model = None
props = info.get(HOMEKIT_PROPERTIES, {})
for key in props:
if key.lower() == HOMEKIT_MODEL:
model = props[key]
break
if model is None:
return False
for test_model in homekit_models:
if (
model != test_model
and not model.startswith(f"{test_model} ")
and not model.startswith(f"{test_model}-")
):
continue
hass.add_job(
hass.config_entries.flow.async_init(
homekit_models[test_model], context={"source": "homekit"}, data=info
)
)
return True
return False
def info_from_service(service):
"""Return prepared info from mDNS entries."""
properties = {"_raw": {}}
for key, value in service.properties.items():
# See https://ietf.org/rfc/rfc6763.html#section-6.4 and
# https://ietf.org/rfc/rfc6763.html#section-6.5 for expected encodings
# for property keys and values
try:
key = key.decode("ascii")
except UnicodeDecodeError:
_LOGGER.debug(
"Ignoring invalid key provided by [%s]: %s", service.name, key
)
continue
properties["_raw"][key] = value
try:
if isinstance(value, bytes):
properties[key] = value.decode("utf-8")
except UnicodeDecodeError:
pass
if not service.addresses:
return None
address = service.addresses[0]
info = {
ATTR_HOST: str(ipaddress.ip_address(address)),
ATTR_PORT: service.port,
ATTR_HOSTNAME: service.server,
ATTR_TYPE: service.type,
ATTR_NAME: service.name,
ATTR_PROPERTIES: properties,
}
return info
def _suppress_invalid_properties(properties):
"""Suppress any properties that will cause zeroconf to fail to startup."""
for prop, prop_value in properties.items():
if not isinstance(prop_value, str):
continue
if len(prop_value.encode("utf-8")) > MAX_PROPERTY_VALUE_LEN:
_LOGGER.error(
"The property '%s' was suppressed because it is longer than the maximum length of %d bytes: %s",
prop,
MAX_PROPERTY_VALUE_LEN,
prop_value,
)
properties[prop] = ""
def _truncate_location_name_to_valid(location_name):
"""Truncate or return the location name usable for zeroconf."""
if len(location_name.encode("utf-8")) < MAX_NAME_LEN:
return location_name
_LOGGER.warning(
"The location name was truncated because it is longer than the maximum length of %d bytes: %s",
MAX_NAME_LEN,
location_name,
)
return location_name.encode("utf-8")[:MAX_NAME_LEN].decode("utf-8", "ignore")
| apache-2.0 |
matiasdecarli/ansible-modules-core | cloud/amazon/ec2_snapshot.py | 67 | 10167 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
choices: ['yes', 'no']
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "1.9"
author: "Will Thames (@willthames)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
import time
import datetime
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = min(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg = 'snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError, e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError, e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError, e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id = dict(),
description = dict(),
instance_id = dict(),
snapshot_id = dict(),
device_name = dict(),
wait = dict(type='bool', default=True),
wait_timeout = dict(type='int', default=0),
last_snapshot_min_age = dict(type='int', default=0),
snapshot_tags = dict(type='dict', default=dict()),
state = dict(choices=['absent','present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
wraiden/spacewalk | client/tools/rhn-kickstart/rhnkickstart/lilo.py | 7 | 8581 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
"""Module for manipulation of lilo.conf files."""
import string
import os
from UserDict import UserDict
class UserDictCase(UserDict):
"""A dictionary with case insensitive keys"""
def __init__(self, data = {}):
UserDict.__init__(self)
# if we are passed a dictionary transfer it over...
for k in data.keys():
kl = string.lower(k)
self.data[kl] = data[k]
# some methods used to make the class work as a dictionary
def __setitem__(self, key, value):
key = string.lower(key)
self.data[key] = value
def __getitem__(self, key):
key = string.lower(key)
if not self.data.has_key(key):
return None
return self.data[key]
get = __getitem__
def __delitem__(self, key):
key = string.lower(key)
del self.data[key]
def has_key(self, key):
key = string.lower(key)
return self.data.has_key(key)
# return this data as a real hash
def get_hash(self):
return self.data
# return the data for marshalling
def __getstate__(self):
return self.data
# we need a setstate because of the __getstate__ presence screws up deepcopy
def __setstate__(self, state):
self.__init__(state)
# get a dictionary out of this instance ({}.update doesn't get instances)
def dict(self):
return self.data
def needsEnterpriseKernel():
rc = 0
try:
f = open("/proc/e820info", "r")
except IOError:
return 0
for l in f.readlines():
l = string.split(l)
if l[3] == '(reserved)': continue
regionEnd = (string.atol(l[0], 16) - 1) + string.atol(l[2], 16)
if regionEnd > 0xffffffffL:
rc = 1
return rc
class LiloConfigFile:
"""class representing a lilo.conf lilo configuration file. Used to manipulate
the file directly"""
def __repr__ (self, tab = 0):
s = ""
for n in self.order:
if (tab):
s = s + '\t'
if n[0] == '#':
s = s + n[1:]
else:
s = s + n
if self.items[n]:
s = s + "=" + self.items[n]
s = s + '\n'
for count in range(len(self.diskRemaps)):
s = s + "disk = %s\n" % self.diskRemaps[count][1]
s = s + "\tbios = %s\n" % self.biosRemaps[count][1]
for cl in self.images:
s = s + "\n%s=%s\n" % (cl.imageType, cl.path)
s = s + cl.__repr__(1)
return s
def addEntry(self, item, val = None, replace = 1):
if not self.items.has_key(item):
self.order.append(item)
elif not replace:
return
if (val):
self.items[item] = str(val)
else:
self.items[item] = None
def getEntry(self, item):
if self.items.has_key(item):
return self.items[item]
else:
return None
def delEntry(self, item):
newOrder = []
for i in self.order:
if item != i: newOrder.append(i)
self.order = newOrder
del self.items[item]
def listEntries(self):
foo = self.items
return foo
def testEntry(self, item):
if self.items.has_key(item):
return 1
else:
return 0
def getImage(self, label):
for config in self.images:
if string.lower(config.getEntry('label')) == string.lower(label):
return (config.imageType, config,config.path,config.other)
if config.getEntry('alias'):
if string.lower(config.getEntry('alias')) == string.lower(label):
return (config.imageType, config,config.path,config.other)
raise IndexError, "unknown image %s" % (label)
def addImage (self, config,first=None):
# make sure the config has a valid label
config.getEntry('label')
if not config.path or not config.imageType:
raise ValueError, "subconfig missing path or image type"
if first:
self.images = [config] + self.images
else:
self.images.append(config)
def delImage (self, label):
for config in self.images:
if string.lower(config.getEntry('label')) == string.lower(label):
self.images.remove (config)
return
raise IndexError, "unknown image %s" % (label,)
def listImages (self):
l = []
for config in self.images:
l.append(config.getEntry('label'))
return l
def listAliases (self):
l = []
for config in self.images:
if config.getEntry('alias'):
l.append(config.getEntry('alias'))
return l
def getPath (self):
return self.path
def write(self, file, perms = 0644):
f = open(file, "w")
f.write(self.__repr__())
f.close()
os.chmod(file, perms)
def read (self, file):
f = open(file, "r")
image = None
for l in f.readlines():
l = l[:-1]
orig = l
while (l and (l[0] == ' ' or l[0] == '\t')):
l = l[1:]
if not l:
continue
if l[0] == '#':
self.order.append('#' + orig)
continue
fields = string.split(l, '=', 1)
if (len(fields) == 2):
f0 = string.strip (fields [0])
f1 = string.strip (fields [1])
if (f0 == "image" or f0 == "other"):
if image: self.addImage(image)
image = LiloConfigFile(imageType = f0,
path = f1)
if (f0 == "other"):
image.other = 1
args = None
else:
args = (f0, f1)
if (f0 == "disk"):
self.diskRemaps.append((f0,f1))
args = None
if (f0 == "bios"):
self.biosRemaps.append((f0,f1))
args = None
else:
args = (string.strip (l),)
if (args and image):
apply(image.addEntry, args)
elif args:
apply(self.addEntry, args)
if image: self.addImage(image)
f.close()
def __init__(self, imageType = None, path = None):
self.imageType = imageType
self.path = path
self.order = []
self.images = []
self.other = None
self.items = UserDictCase()
self.biosRemaps = []
self.diskRemaps = []
self.unsupported = []
def getArch ():
arch = os.uname ()[4]
if (len (arch) == 4 and arch[0] == 'i' and
arch[2:4] == "86"):
arch = "i386"
if arch == "sparc64":
arch = "sparc"
return arch
if __name__ == "__main__":
config = LiloConfigFile ()
config.read ('/etc/lilo.conf')
print config
print "image list", config.listImages()
config.delImage ('linux')
print '----------------------------------'
config = LiloConfigFile ()
config.read ('/etc/lilo.conf')
print config
print '----------------------------------'
print '----------------------------------'
print "list images"
print config.listImages()
print config.getImage('linux')
print "----------------------------------"
print "addimage (testlinux)"
blip = """
read-only
blippy-blob=sdfsdf
append=\"sdfasdfasdf\"
root=/dev/hda6
"""
sl = LiloConfigFile(imageType = "image", path="/boot/somevmlinuz-2.4.0")
sl.addEntry("label", "newkernel")
sl.addEntry("initrd", "blipppy")
config.addImage(sl)
print '-------------------------------------'
print "writing out /tmp/lilo.conf"
print config.write("/tmp/lilo.conf")
print config
| gpl-2.0 |
Rocky5/XBMC-Emustation | Mod Files/system/scripts/_modules/script.module.requests/lib/requests/packages/urllib3/util/request.py | 1008 | 2089 | from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
| gpl-3.0 |
wasade/networkx | networkx/algorithms/flow/mincost.py | 9 | 11222 | # -*- coding: utf-8 -*-
"""
Minimum cost flow algorithms on directed connected graphs.
"""
__author__ = """Loïc Séguin-C. <[email protected]>"""
# Copyright (C) 2010 Loïc Séguin-C. <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['min_cost_flow_cost',
'min_cost_flow',
'cost_of_flow',
'max_flow_min_cost']
import networkx as nx
def min_cost_flow_cost(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Find the cost of a minimum cost flow satisfying all demands in digraph G.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowCost: integer, float
Cost of a minimum cost flow satisfying all demands.
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow, network_simplex
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowCost = nx.min_cost_flow_cost(G)
>>> flowCost
24
"""
return nx.network_simplex(G, demand = demand, capacity = capacity,
weight = weight)[0]
def min_cost_flow(G, demand = 'demand', capacity = 'capacity',
weight = 'weight'):
"""Return a minimum cost flow satisfying all demands in digraph G.
G is a digraph with edge costs and capacities and in which nodes
have demand, i.e., they want to send or receive some amount of
flow. A negative demand means that the node wants to send flow, a
positive demand means that the node want to receive flow. A flow on
the digraph G satisfies all demand if the net flow into each node
is equal to the demand of that node.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
demand: string
Nodes of the graph G are expected to have an attribute demand
that indicates how much flow a node wants to send (negative
demand) or receive (positive demand). Note that the sum of the
demands should be 0 otherwise the problem in not feasible. If
this attribute is not present, a node is considered to have 0
demand. Default value: 'demand'.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnfeasible
This exception is raised in the following situations:
* The sum of the demands is not zero. Then, there is no
flow satisfying all demands.
* There is no flow satisfying all demand.
NetworkXUnbounded
This exception is raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
satisfying all demands is unbounded below.
See also
--------
cost_of_flow, max_flow_min_cost, min_cost_flow_cost, network_simplex
Examples
--------
A simple example of a min cost flow problem.
>>> import networkx as nx
>>> G = nx.DiGraph()
>>> G.add_node('a', demand = -5)
>>> G.add_node('d', demand = 5)
>>> G.add_edge('a', 'b', weight = 3, capacity = 4)
>>> G.add_edge('a', 'c', weight = 6, capacity = 10)
>>> G.add_edge('b', 'd', weight = 1, capacity = 9)
>>> G.add_edge('c', 'd', weight = 2, capacity = 5)
>>> flowDict = nx.min_cost_flow(G)
"""
return nx.network_simplex(G, demand = demand, capacity = capacity,
weight = weight)[1]
def cost_of_flow(G, flowDict, weight = 'weight'):
"""Compute the cost of the flow given by flowDict on graph G.
Note that this function does not check for the validity of the
flow flowDict. This function will fail if the graph G and the
flow don't have the same edge set.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Returns
-------
cost: Integer, float
The total cost of the flow. This is given by the sum over all
edges of the product of the edge's flow and the edge's weight.
See also
--------
max_flow_min_cost, min_cost_flow, min_cost_flow_cost, network_simplex
"""
return sum((flowDict[u][v] * d.get(weight, 0)
for u, v, d in G.edges(data = True)))
def max_flow_min_cost(G, s, t, capacity = 'capacity', weight = 'weight'):
"""Return a maximum (s, t)-flow of minimum cost.
G is a digraph with edge costs and capacities. There is a source
node s and a sink node t. This function finds a maximum flow from
s to t whose total cost is minimized.
Parameters
----------
G : NetworkX graph
DiGraph on which a minimum cost flow satisfying all demands is
to be found.
s: node label
Source of the flow.
t: node label
Destination of the flow.
capacity: string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
weight: string
Edges of the graph G are expected to have an attribute weight
that indicates the cost incurred by sending one unit of flow on
that edge. If not present, the weight is considered to be 0.
Default value: 'weight'.
Returns
-------
flowDict: dictionary
Dictionary of dictionaries keyed by nodes such that
flowDict[u][v] is the flow edge (u, v).
Raises
------
NetworkXError
This exception is raised if the input graph is not directed or
not connected.
NetworkXUnbounded
This exception is raised if there is an infinite capacity path
from s to t in G. In this case there is no maximum flow. This
exception is also raised if the digraph G has a cycle of
negative cost and infinite capacity. Then, the cost of a flow
is unbounded below.
See also
--------
cost_of_flow, min_cost_flow, min_cost_flow_cost, network_simplex
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_edges_from([(1, 2, {'capacity': 12, 'weight': 4}),
... (1, 3, {'capacity': 20, 'weight': 6}),
... (2, 3, {'capacity': 6, 'weight': -3}),
... (2, 6, {'capacity': 14, 'weight': 1}),
... (3, 4, {'weight': 9}),
... (3, 5, {'capacity': 10, 'weight': 5}),
... (4, 2, {'capacity': 19, 'weight': 13}),
... (4, 5, {'capacity': 4, 'weight': 0}),
... (5, 7, {'capacity': 28, 'weight': 2}),
... (6, 5, {'capacity': 11, 'weight': 1}),
... (6, 7, {'weight': 8}),
... (7, 4, {'capacity': 6, 'weight': 6})])
>>> mincostFlow = nx.max_flow_min_cost(G, 1, 7)
>>> mincost = nx.cost_of_flow(G, mincostFlow)
>>> mincost
373
>>> from networkx.algorithms.flow import maximum_flow
>>> maxFlow = maximum_flow(G, 1, 7)[1]
>>> nx.cost_of_flow(G, maxFlow) >= mincost
True
>>> mincostFlowValue = (sum((mincostFlow[u][7] for u in G.predecessors(7)))
... - sum((mincostFlow[7][v] for v in G.successors(7))))
>>> mincostFlowValue == nx.maximum_flow_value(G, 1, 7)
True
"""
maxFlow = nx.maximum_flow_value(G, s, t, capacity = capacity)
H = nx.DiGraph(G)
H.add_node(s, demand = -maxFlow)
H.add_node(t, demand = maxFlow)
return min_cost_flow(H, capacity = capacity, weight = weight)
| bsd-3-clause |
c1728p9/pyOCD | pyOCD/target/target_lpc800.py | 5 | 1453 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cortex_m import CortexM
from .memory_map import (FlashRegion, RamRegion, MemoryMap)
class LPC800(CortexM):
memoryMap = MemoryMap(
FlashRegion( start=0, length=0x8000, blocksize=0x400, isBootMemory=True),
RamRegion( start=0x10000000, length=0x1000)
)
def __init__(self, transport):
super(LPC800, self).__init__(transport, self.memoryMap)
def resetStopOnReset(self, software_reset = None, map_to_user = True):
CortexM.resetStopOnReset(self, software_reset)
# Remap to use flash and set SP and SP accordingly
if map_to_user:
self.writeMemory(0x40048000, 0x2, 32)
sp = self.readMemory(0x0)
pc = self.readMemory(0x4)
self.writeCoreRegisterRaw('sp', sp)
self.writeCoreRegisterRaw('pc', pc)
| apache-2.0 |
chase-kernan/lsc-seis-gcm | gcm/io/triggers.py | 1 | 3825 |
from gcm.data import triggers as tr
from gcm import utils
import numpy as np
from os.path import join
_ROOT_ATTR_MAP = dict(time='time',
time_min='tstart', time_max='tend',
freq='frequency',
freq_min='fstart', freq_max='fend',
amplitude='amplitude',
snr='snr',
q='q')
def parse_root_triggers(path):
# Let's not load this library unless we have to (it takes a long time)
from rootpy.io import root_open
with root_open(path) as f:
tree = f.triggers
tree.create_buffer()
buffer = tree._buffer
triggers = np.empty(len(tree), dtype=tr.trigger_dtype)
for i, _ in enumerate(tree):
# need to iterate the tree, hence the _ enumerate result
for attr, mapped in _ROOT_ATTR_MAP.iteritems():
triggers[i][attr] = buffer[mapped].value
return triggers
def parse_xml_triggers(path):
from glue.ligolw import array, param, ligolw, table, lsctables
from glue.ligolw import utils as lw_utils
class ContentHandler(ligolw.LIGOLWContentHandler): pass
for module in [array, param, table, lsctables]:
module.use_in(ContentHandler)
xml_doc = lw_utils.load_filename(path, contenthandler=ContentHandler)
table = table.get_table(xml_doc, lsctables.SnglBurstTable.tableName)
triggers = np.empty(len(table), tr.trigger_dtype)
for i, row in enumerate(table):
triggers[i]['time'] = row.peak_time + row.peak_time_ns*1e-9
triggers[i]['time_min'] = row.start_time + row.start_time_ns*1e-9
triggers[i]['time_max'] = row.stop_time + row.stop_time_ns*1e-9
triggers[i]['freq'] = row.peak_frequency
triggers[i]['freq_min'] = row.flow
triggers[i]['freq_max'] = row.fhigh
triggers[i]['amplitude'] = row.amplitude
triggers[i]['snr'] = row.snr
triggers[i]['q'] = 0.0 # not available in xml
return triggers
class TriggerSource(object):
def sync(self, group):
for channel in group.channels:
self.sync_channel(group, channel)
def sync_channel(self, group, channel):
raise NotImplemented
class Omicron(TriggerSource):
def sync_channel(self, group, channel):
with tr.open_triggers(channel, mode='w') as table:
latest = table[-1].time_min if len(table) > 0 else 0
for file in self._get_files(group, channel):
start_time = self._get_start_time(file)
if start_time < latest: continue
print "Syncing {0}".format(file)
triggers = self._parse_file(file)
if len(triggers) == 0: continue
tr.append_triggers(channel, triggers)
latest = triggers[-1]["time_min"]
def _get_files(self, group, channel):
raise NotImplemented
def _parse_file(self, file):
raise NotImplemented
def _get_start_time(self, file):
# ..._TIME_DURATION.extension
return int(file.split(".")[-2].split("-")[-2])
class _ScottXmlTriggers(Omicron):
def _get_files(self, group, channel):
base = "/home/scott.dossa/omicron/triggers"
ifo, chamber = group.name.split("-")
all_channels = join(base, ifo, chamber)
channel_dir = join(all_channels,
"{0.ifo}:{0.subsystem}_{0.name}".format(channel))
return [join(channel_dir, name)
for name in utils.get_files(channel_dir)]
def _parse_file(self, file):
return parse_xml_triggers(file)
default_source = _ScottXmlTriggers()
| gpl-2.0 |
McKabue/CSS3HTML5AzurePortalDesign | node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 1812 | 9537 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
| gpl-3.0 |
newerthcom/savagerebirth | libs/python-2.72/Lib/binhex.py | 216 | 14476 | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import sys
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
LINELEN=64
RUNCHAR=chr(0x90) # run-length introducer
#
# This code is no longer byte-order dependent
#
# Workarounds for non-mac machines.
try:
from Carbon.File import FSSpec, FInfo
from MacOS import openrf
def getfileinfo(name):
finfo = FSSpec(name).FSpGetFInfo()
dir, file = os.path.split(name)
# XXX Get resource/data sizes
fp = open(name, 'rb')
fp.seek(0, 2)
dlen = fp.tell()
fp = openrf(name, '*rb')
fp.seek(0, 2)
rlen = fp.tell()
return file, finfo, dlen, rlen
def openrsrc(name, *mode):
if not mode:
mode = '*rb'
else:
mode = '*' + mode[0]
return openrf(name, mode)
except ImportError:
#
# Glue code for non-macintosh usage
#
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
# Quick check for textfile
fp = open(name)
data = open(name).read(256)
for c in data:
if not c.isspace() and (c<' ' or ord(c) > 0x7f):
break
else:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return ''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
self.hqxdata = ''
self.linelen = LINELEN-1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen//3)*3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata)-self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last]+'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + ':\n')
def close(self):
if self.data:
self.hqxdata = \
self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = ''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = ''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if type(ofp) == type(''):
ofname = ofp
ofp = open(ofname, 'w')
ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error, 'Filename too long'
d = chr(nl) + name + '\0'
d2 = finfo.Type + finfo.Creator
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error, 'Writing data at the wrong time'
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Writing resource data at the wrong time'
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Close at the wrong time'
if self.rlen != 0:
raise Error, \
"Incorrect resource-datasize, diff=%r" % (self.rlen,)
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""(infilename, outfilename) - Create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while 1:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = ''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd+2)//3)*4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while 1:
try:
decdatacur, self.eof = \
binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error, \
'Premature EOF on binhex file'
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error, 'Premature EOF on binhex file'
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = ''
self.post_buffer = ''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd-len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = ''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + '\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if type(ifp) == type(''):
ifp = open(ifp)
#
# Find initial colon.
#
while 1:
ch = ifp.read(1)
if not ch:
raise Error, "No binhex data found"
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == '\r':
continue
if ch == ':':
break
if ch != '\n':
dummy = ifp.readline()
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error, 'CRC error, computed %x, read %x' \
%(self.crc, filecrc)
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1+4+4+2+4+4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error, 'Read data at wrong time'
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = ''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error, 'close_data at wrong time'
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error, 'Read resource data at wrong time'
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = open(out, 'wb')
# XXXX Do translation on non-mac systems
while 1:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while 1:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
def _test():
fname = sys.argv[1]
binhex(fname, fname+'.hqx')
hexbin(fname+'.hqx', fname+'.viahqx')
#hexbin(fname, fname+'.unpacked')
sys.exit(1)
if __name__ == '__main__':
_test()
| gpl-2.0 |
Dreizan/csci1200OnlineCourse | models/roles.py | 7 | 8356 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages mapping of users to roles and roles to privileges."""
__author__ = 'Pavel Simakov ([email protected])'
import collections
import config
from common import utils
from models import MemcacheManager
from models import RoleDAO
from google.appengine.api import users
GCB_ADMIN_LIST = config.ConfigProperty(
'gcb_admin_user_emails', str, (
'A list of email addresses for super-admin users. '
'WARNING! Super-admin users have the highest level of access to your '
'Google App Engine instance and to all data about all courses and '
'students within that instance. Be very careful when modifying this '
'property. '
'Syntax: Entries may be separated with any combination of '
'tabs, spaces, commas, or newlines. Existing values using "[" and '
'"]" around email addresses continues to be supported. '
'Regular expressions are not supported.'),
'', multiline=True)
KEY_COURSE = 'course'
KEY_ADMIN_USER_EMAILS = 'admin_user_emails'
GCB_WHITELISTED_USERS = config.ConfigProperty(
'gcb_user_whitelist', str, (
'A list of email addresses of users allowed access to courses. '
'If this is blank, site-wide user whitelisting is disabled. '
'Access to courses is also implicitly granted to super admins and '
'course admins, so you need not repeat those names here. '
'Course-specific whitelists trump this list - if a course has a '
'non-blank whitelist, this one is ignored. '
'Syntax: Entries may be separated with any combination of '
'tabs, spaces, commas, or newlines. Existing values using "[" and '
'"]" around email addresses continues to be supported. '
'Regular expressions are not supported.'),
'', multiline=True)
Permission = collections.namedtuple('Permission', ['name', 'description'])
class Roles(object):
"""A class that provides information about user roles."""
# Maps module names to callbacks which generate permissions.
# See register_permissions for the structure of the callbacks.
_REGISTERED_PERMISSIONS = collections.OrderedDict()
memcache_key = 'roles.Roles.users_to_permissions_map'
@classmethod
def is_direct_super_admin(cls):
"""Checks if current user is a super admin, without delegation."""
return users.get_current_user() and users.is_current_user_admin()
@classmethod
def is_super_admin(cls):
"""Checks if current user is a super admin, possibly via delegation."""
if cls.is_direct_super_admin():
return True
return cls._user_email_in(users.get_current_user(),
GCB_ADMIN_LIST.value)
@classmethod
def is_course_admin(cls, app_context):
"""Checks if a user is a course admin, possibly via delegation."""
if cls.is_super_admin():
return True
if KEY_COURSE in app_context.get_environ():
environ = app_context.get_environ()[KEY_COURSE]
if KEY_ADMIN_USER_EMAILS in environ:
allowed = environ[KEY_ADMIN_USER_EMAILS]
user = users.get_current_user()
if allowed and cls._user_email_in(user, allowed):
return True
return False
@classmethod
def is_user_whitelisted(cls, app_context):
user = users.get_current_user()
global_whitelist = GCB_WHITELISTED_USERS.value.strip()
course_whitelist = app_context.whitelist.strip()
# Most-specific whitelist used if present.
if course_whitelist:
return cls._user_email_in(user, course_whitelist)
# Global whitelist if no course whitelist
elif global_whitelist:
return cls._user_email_in(user, global_whitelist)
# Lastly, no whitelist = no restrictions
else:
return True
@classmethod
def _user_email_in(cls, user, text):
return user and user.email() in utils.text_to_list(
text, utils.BACKWARD_COMPATIBLE_SPLITTER)
@classmethod
def update_permissions_map(cls):
"""Puts a dictionary mapping users to permissions in memcache.
A dictionary is constructed, using roles information from the datastore,
mapping user emails to dictionaries that map module names to
sets of permissions.
Returns:
The created dictionary.
"""
permissions_map = {}
for role in RoleDAO.get_all():
for user in role.users:
user_permissions = permissions_map.setdefault(user, {})
for (module_name, permissions) in role.permissions.iteritems():
module_permissions = user_permissions.setdefault(
module_name, set())
module_permissions.update(permissions)
MemcacheManager.set(cls.memcache_key, permissions_map)
return permissions_map
@classmethod
def _load_permissions_map(cls):
"""Loads the permissions map from Memcache or creates it if needed."""
permissions_map = MemcacheManager.get(cls.memcache_key)
if not permissions_map:
permissions_map = cls.update_permissions_map()
return permissions_map
@classmethod
def is_user_allowed(cls, app_context, module, permission):
"""Check whether the current user is assigned a certain permission.
Args:
app_context: sites.ApplicationContext of the relevant course
module: module object that registered the permission.
permission: string specifying the permission.
Returns:
boolean indicating whether the current user is allowed to perform
the action associated with the permission.
"""
if cls.is_course_admin(app_context):
return True
if not module or not permission or not users.get_current_user():
return False
permissions_map = cls._load_permissions_map()
user_permissions = permissions_map.get(
users.get_current_user().email(), {})
return permission in user_permissions.get(module.name, set())
@classmethod
def register_permissions(cls, module, callback_function):
"""Registers a callback function that generates permissions.
A callback should return an iteratable of permissions of the type
Permission(permission_name, permission_description)
Example:
Module 'module-werewolf' registers permissions 'can_howl' and
'can_hunt' by defining a function callback_werewolf returning:
[
Permission('can_howl', 'Can howl to the moon'),
Permission('can_hunt', 'Can hunt for sheep')
]
In order to register these permissions the module calls
register_permissions(module, callback_werewolf) with the module
whose module.name is 'module-werewolf'.
Args:
module: module object that registers the permissions.
callback_function: a function accepting ApplicationContext as sole
argument and returning a list of permissions.
"""
assert module is not None
assert module.name
assert module not in cls._REGISTERED_PERMISSIONS
cls._REGISTERED_PERMISSIONS[module] = callback_function
@classmethod
def unregister_permissions(cls, module):
del cls._REGISTERED_PERMISSIONS[module]
@classmethod
def get_modules(cls):
return cls._REGISTERED_PERMISSIONS.iterkeys()
@classmethod
def get_permissions(cls):
return cls._REGISTERED_PERMISSIONS.iteritems()
| apache-2.0 |
viacoin/viacoin | contrib/devtools/copyright_header.py | 13 | 22431 | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# libsecp256k1:
'src/secp256k1/include/secp256k1.h',
'src/secp256k1/include/secp256k1_ecdh.h',
'src/secp256k1/include/secp256k1_recovery.h',
'src/secp256k1/include/secp256k1_schnorr.h',
'src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c',
'src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h',
'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.c',
'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h',
# univalue:
'src/univalue/test/object.cpp',
'src/univalue/lib/univalue_escapes.h',
# auto generated:
'src/qt/bitcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/tinyformat.h',
'src/leveldb/util/env_win.cc',
'src/crypto/ctaes/bench.c',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files'
def call_git_ls():
out = subprocess.check_output(GIT_LS_CMD.split(' '))
return [f for f in out.decode("utf-8").split('\n') if f != '']
def get_filenames_to_examine():
filenames = call_git_ls()
return sorted([filename for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = 'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile('%s %s %s' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
"Satoshi Nakamoto\n",
"The Bitcoin Core developers\n",
"The Bitcoin Core developers \n",
"Bitcoin Core Developers\n",
"the Bitcoin Core developers\n",
"The Bitcoin developers\n",
"The LevelDB Authors\. All rights reserved\.\n",
"BitPay Inc\.\n",
"BitPay, Inc\.\n",
"University of Illinois at Urbana-Champaign\.\n",
"MarcoFalke\n",
"Pieter Wuille\n",
"Pieter Wuille +\*\n",
"Pieter Wuille, Gregory Maxwell +\*\n",
"Pieter Wuille, Andrew Poelstra +\*\n",
"Andrew Poelstra +\*\n",
"Wladimir J. van der Laan\n",
"Jeff Garzik\n",
"Diederik Huys, Pieter Wuille +\*\n",
"Thomas Daede, Cory Fields +\*\n",
"Jan-Klaas Kollhof\n",
"Sam Rushing\n",
"ArtForz -- public domain half-a-node\n",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(os.path.abspath(filename), 'r').read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
original_cwd = os.getcwd()
os.chdir(base_directory)
filenames = get_filenames_to_examine()
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
os.chdir(original_cwd)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(os.path.abspath(filename), 'r')
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(os.path.abspath(filename), 'w')
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = 'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Bitcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
original_cwd = os.getcwd()
os.chdir(base_directory)
for filename in get_filenames_to_examine():
update_updatable_copyright(filename)
os.chdir(original_cwd)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Bitcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a bitcoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
PYTHON_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_python_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(PYTHON_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index != None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_python_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_python_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style == 'python':
insert_python_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Bitcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Bitcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the bitcoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Bitcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
| mit |
IT-Department-Projects/OOAD-Project | Flask_App/oakcrest/lib/python2.7/site-packages/requests/packages/urllib3/contrib/pyopenssl.py | 75 | 15139 | """
SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 16.0.0)
* cryptography (minimum 1.3.4, from pyopenssl)
* idna (minimum 2.0, from cryptography)
However, pyopenssl depends on cryptography, which depends on idna, so while we
use all three directly here we end up having relatively few packages required.
You can install them with the following command:
pip install pyopenssl cryptography idna
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
compression in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
"""
from __future__ import absolute_import
import OpenSSL.SSL
from cryptography import x509
from cryptography.hazmat.backends.openssl import backend as openssl_backend
from cryptography.hazmat.backends.openssl.x509 import _Certificate
from socket import timeout, error as SocketError
from io import BytesIO
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
import logging
import ssl
import six
import sys
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI always works.
HAS_SNI = True
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
_openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
try:
_openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
except AttributeError:
pass
_stdlib_to_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED:
OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
_openssl_to_stdlib_verify = dict(
(v, k) for k, v in _stdlib_to_openssl_verify.items()
)
# OpenSSL will only write 16K at a time
SSL_WRITE_BLOCKSIZE = 16384
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
log = logging.getLogger(__name__)
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
_validate_dependencies_met()
util.ssl_.SSLContext = PyOpenSSLContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_PYOPENSSL = True
util.ssl_.IS_PYOPENSSL = True
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_PYOPENSSL = False
util.ssl_.IS_PYOPENSSL = False
def _validate_dependencies_met():
"""
Verifies that PyOpenSSL's package-level dependencies have been met.
Throws `ImportError` if they are not met.
"""
# Method added in `cryptography==1.1`; not available in older versions
from cryptography.x509.extensions import Extensions
if getattr(Extensions, "get_extension_for_class", None) is None:
raise ImportError("'cryptography' module missing required functionality. "
"Try upgrading to v1.3.4 or newer.")
# pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
# attribute is only present on those versions.
from OpenSSL.crypto import X509
x509 = X509()
if getattr(x509, "_x509", None) is None:
raise ImportError("'pyOpenSSL' module missing required functionality. "
"Try upgrading to v0.14 or newer.")
def _dnsname_to_stdlib(name):
"""
Converts a dNSName SubjectAlternativeName field to the form used by the
standard library on the given Python version.
Cryptography produces a dNSName as a unicode string that was idna-decoded
from ASCII bytes. We need to idna-encode that string to get it back, and
then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
"""
def idna_encode(name):
"""
Borrowed wholesale from the Python Cryptography Project. It turns out
that we can't just safely call `idna.encode`: it can explode for
wildcard names. This avoids that problem.
"""
import idna
for prefix in [u'*.', u'.']:
if name.startswith(prefix):
name = name[len(prefix):]
return prefix.encode('ascii') + idna.encode(name)
return idna.encode(name)
name = idna_encode(name)
if sys.version_info >= (3, 0):
name = name.decode('utf-8')
return name
def get_subj_alt_name(peer_cert):
"""
Given an PyOpenSSL certificate, provides all the subject alternative names.
"""
# Pass the cert to cryptography, which has much better APIs for this.
# This is technically using private APIs, but should work across all
# relevant versions until PyOpenSSL gets something proper for this.
cert = _Certificate(openssl_backend, peer_cert._x509)
# We want to find the SAN extension. Ask Cryptography to locate it (it's
# faster than looping in Python)
try:
ext = cert.extensions.get_extension_for_class(
x509.SubjectAlternativeName
).value
except x509.ExtensionNotFound:
# No such extension, return the empty list.
return []
except (x509.DuplicateExtension, x509.UnsupportedExtension,
x509.UnsupportedGeneralNameType, UnicodeError) as e:
# A problem has been found with the quality of the certificate. Assume
# no SAN field is present.
log.warning(
"A problem was encountered with the certificate that prevented "
"urllib3 from finding the SubjectAlternativeName field. This can "
"affect certificate validation. The error was %s",
e,
)
return []
# We want to return dNSName and iPAddress fields. We need to cast the IPs
# back to strings because the match_hostname function wants them as
# strings.
# Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
# decoded. This is pretty frustrating, but that's what the standard library
# does with certificates, and so we need to attempt to do the same.
names = [
('DNS', _dnsname_to_stdlib(name))
for name in ext.get_values_for_type(x509.DNSName)
]
names.extend(
('IP Address', str(name))
for name in ext.get_values_for_type(x509.IPAddress)
)
return names
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.
Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
collector of pypy.
'''
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return b''
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(self.socket, self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError as e:
if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(self.socket, self.socket.gettimeout())
if not rd:
raise timeout('The read operation timed out')
else:
return self.recv_into(*args, **kwargs)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
wr = util.wait_for_write(self.socket, self.socket.gettimeout())
if not wr:
raise timeout()
continue
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
# FIXME rethrow compatible exceptions should we ever use this
self.connection.shutdown()
def close(self):
if self._makefile_refs < 1:
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': get_subj_alt_name(x509)
}
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
makefile = backport_makefile
WrappedSocket.makefile = makefile
class PyOpenSSLContext(object):
"""
I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
for translating the interface of the standard library ``SSLContext`` object
to calls into PyOpenSSL.
"""
def __init__(self, protocol):
self.protocol = _openssl_versions[protocol]
self._ctx = OpenSSL.SSL.Context(self.protocol)
self._options = 0
self.check_hostname = False
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options = value
self._ctx.set_options(value)
@property
def verify_mode(self):
return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
@verify_mode.setter
def verify_mode(self, value):
self._ctx.set_verify(
_stdlib_to_openssl_verify[value],
_verify_callback
)
def set_default_verify_paths(self):
self._ctx.set_default_verify_paths()
def set_ciphers(self, ciphers):
if isinstance(ciphers, six.text_type):
ciphers = ciphers.encode('utf-8')
self._ctx.set_cipher_list(ciphers)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
if cafile is not None:
cafile = cafile.encode('utf-8')
if capath is not None:
capath = capath.encode('utf-8')
self._ctx.load_verify_locations(cafile, capath)
if cadata is not None:
self._ctx.load_verify_locations(BytesIO(cadata))
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._ctx.use_certificate_file(certfile)
if password is not None:
self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
self._ctx.use_privatekey_file(keyfile or certfile)
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True, suppress_ragged_eofs=True,
server_hostname=None):
cnx = OpenSSL.SSL.Connection(self._ctx, sock)
if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
server_hostname = server_hostname.encode('utf-8')
if server_hostname is not None:
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
rd = util.wait_for_read(sock, sock.gettimeout())
if not rd:
raise timeout('select timed out')
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake: %r' % e)
break
return WrappedSocket(cnx, sock)
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
| mit |
chinnuabhi/pjsip | tests/pjsua/scripts-sendto/252_multipart_ok_clutter.py | 40 | 1058 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
body = \
"""
This is the preamble. It is to be ignored, though it
is a handy place for composition agents to include an
explanatory note to non-MIME conformant readers.
--123:45
Content-Type: text/plain
The first part is definitely not SDP
--123:45
This is implicitly typed plain US-ASCII text.
It does NOT end with a linebreak.
--123:45
Content-Type: application/sdp
v=0
o=- 0 0 IN IP4 127.0.0.1
s=pjmedia
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/AVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
--123:45--
This is the epilogue. It is also to be ignored.
"""
args = "--null-audio --auto-answer 200 --max-calls 1"
extra_headers = "Content-Type: multipart/mixed; boundary=\"123:45\""
include = ["v=0", "m=audio"]
exclude = []
sendto_cfg = sip.SendtoCfg( "Valid but cluttered multipart/mixed body containing SDP",
pjsua_args=args, sdp="", resp_code=200,
extra_headers=extra_headers, body=body,
resp_inc=include, resp_exc=exclude)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.