repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
NathanW2/QGIS | tests/src/python/test_authmanager_pki_ows.py | 21 | 7815 | # -*- coding: utf-8 -*-
"""
Tests for auth manager WMS/WFS using QGIS Server through PKI
enabled qgis_wrapped_server.py.
This is an integration test for QGIS Desktop Auth Manager WFS and WMS provider
and QGIS Server WFS/WMS that check if QGIS can use a stored auth manager auth
configuration to access an HTTP Basic protected endpoint.
From build dir, run: ctest -R PyQgsAuthManagerPKIOWSTest -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import sys
import re
import subprocess
import tempfile
import urllib
import stat
__author__ = 'Alessandro Pasotti'
__date__ = '25/10/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from shutil import rmtree
from utilities import unitTestDataPath, waitServer
from qgis.core import (
QgsApplication,
QgsAuthManager,
QgsAuthMethodConfig,
QgsVectorLayer,
QgsRasterLayer,
)
from qgis.PyQt.QtNetwork import QSslCertificate
from qgis.testing import (
start_app,
unittest,
)
try:
QGIS_SERVER_ENDPOINT_PORT = os.environ['QGIS_SERVER_ENDPOINT_PORT']
except:
QGIS_SERVER_ENDPOINT_PORT = '0' # Auto
QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()
os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH
qgis_app = start_app()
class TestAuthManager(unittest.TestCase):
@classmethod
def setUpAuth(cls):
"""Run before all tests and set up authentication"""
authm = QgsApplication.authManager()
assert (authm.setMasterPassword('masterpassword', True))
cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'chains_subissuer-issuer-root_issuer2-root2.pem')
cls.sslcert = os.path.join(cls.certsdata_path, 'gerardus_cert.pem')
cls.sslkey = os.path.join(cls.certsdata_path, 'gerardus_key.pem')
assert os.path.isfile(cls.sslcert)
assert os.path.isfile(cls.sslkey)
assert os.path.isfile(cls.sslrootcert_path)
os.chmod(cls.sslcert, stat.S_IRUSR)
os.chmod(cls.sslkey, stat.S_IRUSR)
os.chmod(cls.sslrootcert_path, stat.S_IRUSR)
cls.auth_config = QgsAuthMethodConfig("PKI-Paths")
cls.auth_config.setConfig('certpath', cls.sslcert)
cls.auth_config.setConfig('keypath', cls.sslkey)
cls.auth_config.setName('test_pki_auth_config')
cls.username = 'Gerardus'
cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path)
assert cls.sslrootcert is not None
authm.storeCertAuthorities(cls.sslrootcert)
authm.rebuildCaCertsCache()
authm.rebuildTrustedCaCertsCache()
assert (authm.storeAuthenticationConfig(cls.auth_config)[0])
assert cls.auth_config.isValid()
# cls.server_cert = os.path.join(cls.certsdata_path, 'localhost_ssl_cert.pem')
cls.server_cert = os.path.join(cls.certsdata_path, '127_0_0_1_ssl_cert.pem')
# cls.server_key = os.path.join(cls.certsdata_path, 'localhost_ssl_key.pem')
cls.server_key = os.path.join(cls.certsdata_path, '127_0_0_1_ssl_key.pem')
cls.server_rootcert = cls.sslrootcert_path
os.chmod(cls.server_cert, stat.S_IRUSR)
os.chmod(cls.server_key, stat.S_IRUSR)
os.chmod(cls.server_rootcert, stat.S_IRUSR)
os.environ['QGIS_SERVER_HOST'] = cls.hostname
os.environ['QGIS_SERVER_PORT'] = str(cls.port)
os.environ['QGIS_SERVER_PKI_KEY'] = cls.server_key
os.environ['QGIS_SERVER_PKI_CERTIFICATE'] = cls.server_cert
os.environ['QGIS_SERVER_PKI_USERNAME'] = cls.username
os.environ['QGIS_SERVER_PKI_AUTHORITY'] = cls.server_rootcert
@classmethod
def setUpClass(cls):
"""Run before all tests:
Creates an auth configuration"""
cls.port = QGIS_SERVER_ENDPOINT_PORT
# Clean env just to be sure
env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
for ev in env_vars:
try:
del os.environ[ev]
except KeyError:
pass
cls.testdata_path = unitTestDataPath('qgis_server')
cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys')
cls.project_path = os.path.join(cls.testdata_path, "test_project.qgs")
# cls.hostname = 'localhost'
cls.protocol = 'https'
cls.hostname = '127.0.0.1'
cls.setUpAuth()
server_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'qgis_wrapped_server.py')
cls.server = subprocess.Popen([sys.executable, server_path],
env=os.environ, stdout=subprocess.PIPE)
line = cls.server.stdout.readline()
cls.port = int(re.findall(b':(\d+)', line)[0])
assert cls.port != 0
# Wait for the server process to start
assert waitServer('%s://%s:%s' % (cls.protocol, cls.hostname, cls.port)), "Server is not responding! %s://%s:%s" % (cls.protocol, cls.hostname, cls.port)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.terminate()
rmtree(QGIS_AUTH_DB_DIR_PATH)
del cls.server
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _getWFSLayer(cls, type_name, layer_name=None, authcfg=None):
"""
WFS layer factory
"""
if layer_name is None:
layer_name = 'wfs_' + type_name
parms = {
'srsname': 'EPSG:4326',
'typename': type_name,
'url': '%s://%s:%s/?map=%s' % (cls.protocol, cls.hostname, cls.port, cls.project_path),
'version': 'auto',
'table': '',
}
if authcfg is not None:
parms.update({'authcfg': authcfg})
uri = ' '.join([("%s='%s'" % (k, v)) for k, v in list(parms.items())])
wfs_layer = QgsVectorLayer(uri, layer_name, 'WFS')
return wfs_layer
@classmethod
def _getWMSLayer(cls, layers, layer_name=None, authcfg=None):
"""
WMS layer factory
"""
if layer_name is None:
layer_name = 'wms_' + layers.replace(',', '')
parms = {
'crs': 'EPSG:4326',
'url': '%s://%s:%s/?map=%s' % (cls.protocol, cls.hostname, cls.port, cls.project_path),
'format': 'image/png',
# This is needed because of a really weird implementation in QGIS Server, that
# replaces _ in the the real layer name with spaces
'layers': urllib.parse.quote(layers.replace('_', ' ')),
'styles': '',
'version': 'auto',
# 'sql': '',
}
if authcfg is not None:
parms.update({'authcfg': authcfg})
uri = '&'.join([("%s=%s" % (k, v.replace('=', '%3D'))) for k, v in list(parms.items())])
wms_layer = QgsRasterLayer(uri, layer_name, 'wms')
return wms_layer
def testValidAuthAccess(self):
"""
Access the protected layer with valid credentials
Note: cannot test invalid access in a separate test because
it would fail the subsequent (valid) calls due to cached connections
"""
wfs_layer = self._getWFSLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(wfs_layer.isValid())
wms_layer = self._getWMSLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(wms_layer.isValid())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
M3nin0/supreme-broccoli | Web/Flask/site_/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py | 3 | 105017 | # postgresql/base.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: postgresql
:name: PostgreSQL
Sequences/SERIAL
----------------
PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
of creating new primary key values for integer-based primary key columns. When
creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
integer-based primary key columns, which generates a sequence and server side
default corresponding to the column.
To specify a specific named sequence to be used for primary key generation,
use the :func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
having the "last insert identifier" available, a RETURNING clause is added to
the INSERT statement which specifies the primary key columns should be
returned after the statement completes. The RETURNING functionality only takes
place if Postgresql 8.2 or later is in use. As a fallback approach, the
sequence, whether specified explicitly or implicitly via ``SERIAL``, is
executed independently beforehand, the returned value to be used in the
subsequent insert. Note that when an
:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
"executemany" semantics, the "last inserted identifier" functionality does not
apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
case.
To force the usage of RETURNING by default off, specify the flag
``implicit_returning=False`` to :func:`.create_engine`.
.. _postgresql_isolation_level:
Transaction Isolation Level
---------------------------
All Postgresql dialects support setting of transaction isolation level
both via a dialect-specific parameter
:paramref:`.create_engine.isolation_level` accepted by :func:`.create_engine`,
as well as the :paramref:`.Connection.execution_options.isolation_level`
argument as passed to :meth:`.Connection.execution_options`.
When using a non-psycopg2 dialect, this feature works by issuing the command
``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level>`` for
each new connection. For the special AUTOCOMMIT isolation level,
DBAPI-specific techniques are used.
To set isolation level using :func:`.create_engine`::
engine = create_engine(
"postgresql+pg8000://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT`` - on psycopg2 / pg8000 only
.. seealso::
:ref:`psycopg2_isolation_level`
:ref:`pg8000_isolation_level`
.. _postgresql_schema_reflection:
Remote-Schema Table Introspection and Postgresql search_path
------------------------------------------------------------
The Postgresql dialect can reflect tables from any schema. The
:paramref:`.Table.schema` argument, or alternatively the
:paramref:`.MetaData.reflect.schema` argument determines which schema will
be searched for the table or tables. The reflected :class:`.Table` objects
will in all cases retain this ``.schema`` attribute as was specified.
However, with regards to tables which these :class:`.Table` objects refer to
via foreign key constraint, a decision must be made as to how the ``.schema``
is represented in those remote tables, in the case where that remote
schema name is also a member of the current
`Postgresql search path
<http://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
By default, the Postgresql dialect mimics the behavior encouraged by
Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function
returns a sample definition for a particular foreign key constraint,
omitting the referenced schema name from that definition when the name is
also in the Postgresql schema search path. The interaction below
illustrates this behavior::
test=> CREATE TABLE test_schema.referred(id INTEGER PRIMARY KEY);
CREATE TABLE
test=> CREATE TABLE referring(
test(> id INTEGER PRIMARY KEY,
test(> referred_id INTEGER REFERENCES test_schema.referred(id));
CREATE TABLE
test=> SET search_path TO public, test_schema;
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f'
test-> ;
pg_get_constraintdef
---------------------------------------------------
FOREIGN KEY (referred_id) REFERENCES referred(id)
(1 row)
Above, we created a table ``referred`` as a member of the remote schema
``test_schema``, however when we added ``test_schema`` to the
PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the
``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of
the function.
On the other hand, if we set the search path back to the typical default
of ``public``::
test=> SET search_path TO public;
SET
The same query against ``pg_get_constraintdef()`` now returns the fully
schema-qualified name for us::
test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM
test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n
test-> ON n.oid = c.relnamespace
test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid
test-> WHERE c.relname='referring' AND r.contype = 'f';
pg_get_constraintdef
---------------------------------------------------------------
FOREIGN KEY (referred_id) REFERENCES test_schema.referred(id)
(1 row)
SQLAlchemy will by default use the return value of ``pg_get_constraintdef()``
in order to determine the remote schema name. That is, if our ``search_path``
were set to include ``test_schema``, and we invoked a table
reflection process as follows::
>>> from sqlalchemy import Table, MetaData, create_engine
>>> engine = create_engine("postgresql://scott:tiger@localhost/test")
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
... referring = Table('referring', meta,
... autoload=True, autoload_with=conn)
...
<sqlalchemy.engine.result.ResultProxy object at 0x101612ed0>
The above process would deliver to the :attr:`.MetaData.tables` collection
``referred`` table named **without** the schema::
>>> meta.tables['referred'].schema is None
True
To alter the behavior of reflection such that the referred schema is
maintained regardless of the ``search_path`` setting, use the
``postgresql_ignore_search_path`` option, which can be specified as a
dialect-specific argument to both :class:`.Table` as well as
:meth:`.MetaData.reflect`::
>>> with engine.connect() as conn:
... conn.execute("SET search_path TO test_schema, public")
... meta = MetaData()
... referring = Table('referring', meta, autoload=True,
... autoload_with=conn,
... postgresql_ignore_search_path=True)
...
<sqlalchemy.engine.result.ResultProxy object at 0x1016126d0>
We will now have ``test_schema.referred`` stored as schema-qualified::
>>> meta.tables['test_schema.referred'].schema
'test_schema'
.. sidebar:: Best Practices for Postgresql Schema reflection
The description of Postgresql schema reflection behavior is complex, and
is the product of many years of dealing with widely varied use cases and
user preferences. But in fact, there's no need to understand any of it if
you just stick to the simplest use pattern: leave the ``search_path`` set
to its default of ``public`` only, never refer to the name ``public`` as
an explicit schema name otherwise, and refer to all other schema names
explicitly when building up a :class:`.Table` object. The options
described here are only for those users who can't, or prefer not to, stay
within these guidelines.
Note that **in all cases**, the "default" schema is always reflected as
``None``. The "default" schema on Postgresql is that which is returned by the
Postgresql ``current_schema()`` function. On a typical Postgresql
installation, this is the name ``public``. So a table that refers to another
which is in the ``public`` (i.e. default) schema will always have the
``.schema`` attribute set to ``None``.
.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path``
dialect-level option accepted by :class:`.Table` and
:meth:`.MetaData.reflect`.
.. seealso::
`The Schema Search Path
<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_
- on the Postgresql website.
INSERT/UPDATE...RETURNING
-------------------------
The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
for single-row INSERT statements in order to fetch newly generated
primary key identifiers. To specify an explicit ``RETURNING`` clause,
use the :meth:`._UpdateBase.returning` method on a per-statement basis::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
result = table.update().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo').values(name='bar')
print result.fetchall()
# DELETE..RETURNING
result = table.delete().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo')
print result.fetchall()
.. _postgresql_insert_on_conflict:
INSERT...ON CONFLICT (Upsert)
------------------------------
Starting with version 9.5, PostgreSQL allows "upserts" (update or insert)
of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT`` statement.
A candidate row will only be inserted if that row does not violate
any unique constraints. In the case of a unique constraint violation,
a secondary action can occur which can be either "DO UPDATE", indicating
that the data in the target row should be updated, or "DO NOTHING",
which indicates to silently skip this row.
Conflicts are determined using existing unique constraints and indexes. These
constraints may be identified either using their name as stated in DDL,
or they may be *inferred* by stating the columns and conditions that comprise
the indexes.
SQLAlchemy provides ``ON CONFLICT`` support via the Postgresql-specific
:func:`.postgresql.dml.insert()` function, which provides
the generative methods :meth:`~.postgresql.dml.Insert.on_conflict_do_update`
and :meth:`~.postgresql.dml.Insert.on_conflict_do_nothing`::
from sqlalchemy.dialects.postgresql import insert
insert_stmt = insert(my_table).values(
id='some_existing_id',
data='inserted value')
do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
index_elements=['id']
)
conn.execute(do_nothing_stmt)
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='pk_my_table',
set_=dict(data='updated value')
)
conn.execute(do_update_stmt)
Both methods supply the "target" of the conflict using either the
named constraint or by column inference:
* The :paramref:`.Insert.on_conflict_do_update.index_elements` argument
specifies a sequence containing string column names, :class:`.Column` objects,
and/or SQL expression elements, which would identify a unique index::
do_update_stmt = insert_stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value')
)
do_update_stmt = insert_stmt.on_conflict_do_update(
index_elements=[my_table.c.id],
set_=dict(data='updated value')
)
* When using :paramref:`.Insert.on_conflict_do_update.index_elements` to
infer an index, a partial index can be inferred by also specifying the
use the :paramref:`.Insert.on_conflict_do_update.index_where` parameter::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(user_email='[email protected]', data='inserted data')
stmt = stmt.on_conflict_do_update(
index_elements=[my_table.c.user_email],
index_where=my_table.c.user_email.like('%@gmail.com'),
set_=dict(data=stmt.excluded.data)
)
conn.execute(stmt)
* The :paramref:`.Insert.on_conflict_do_update.constraint` argument is
used to specify an index directly rather than inferring it. This can be
the name of a UNIQUE constraint, a PRIMARY KEY constraint, or an INDEX::
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='my_table_idx_1',
set_=dict(data='updated value')
)
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint='my_table_pk',
set_=dict(data='updated value')
)
* The :paramref:`.Insert.on_conflict_do_update.constraint` argument may
also refer to a SQLAlchemy construct representing a constraint,
e.g. :class:`.UniqueConstraint`, :class:`.PrimaryKeyConstraint`,
:class:`.Index`, or :class:`.ExcludeConstraint`. In this use,
if the constraint has a name, it is used directly. Otherwise, if the
constraint is unnamed, then inference will be used, where the expressions
and optional WHERE clause of the constraint will be spelled out in the
construct. This use is especially convenient
to refer to the named or unnamed primary key of a :class:`.Table` using the
:attr:`.Table.primary_key` attribute::
do_update_stmt = insert_stmt.on_conflict_do_update(
constraint=my_table.primary_key,
set_=dict(data='updated value')
)
``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are specified using the
:paramref:`.Insert.on_conflict_do_update.set_` parameter. This
parameter accepts a dictionary which consists of direct values
for UPDATE::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
do_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value')
)
conn.execute(do_update_stmt)
.. warning::
The :meth:`.Insert.on_conflict_do_update` method does **not** take into
account Python-side default UPDATE values or generation functions, e.g.
e.g. those specified using :paramref:`.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of UPDATE,
unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
In order to refer to the proposed insertion row, the special alias
:attr:`~.postgresql.dml.Insert.excluded` is available as an attribute on
the :class:`.postgresql.dml.Insert` object; this object is a
:class:`.ColumnCollection` which alias contains all columns of the target
table::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(
id='some_id',
data='inserted value',
author='jlh')
do_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value', author=stmt.excluded.author)
)
conn.execute(do_update_stmt)
The :meth:`.Insert.on_conflict_do_update` method also accepts
a WHERE clause using the :paramref:`.Insert.on_conflict_do_update.where`
parameter, which will limit those rows which receive an UPDATE::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(
id='some_id',
data='inserted value',
author='jlh')
on_update_stmt = stmt.on_conflict_do_update(
index_elements=['id'],
set_=dict(data='updated value', author=stmt.excluded.author)
where=(my_table.c.status == 2)
)
conn.execute(on_update_stmt)
``ON CONFLICT`` may also be used to skip inserting a row entirely
if any conflict with a unique or exclusion constraint occurs; below
this is illustrated using the
:meth:`~.postgresql.dml.Insert.on_conflict_do_nothing` method::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
conn.execute(stmt)
If ``DO NOTHING`` is used without specifying any columns or constraint,
it has the effect of skipping the INSERT for any unique or exclusion
constraint violation which occurs::
from sqlalchemy.dialects.postgresql import insert
stmt = insert(my_table).values(id='some_id', data='inserted value')
stmt = stmt.on_conflict_do_nothing()
conn.execute(stmt)
.. versionadded:: 1.1 Added support for Postgresql ON CONFLICT clauses
.. seealso::
`INSERT .. ON CONFLICT <http://www.postgresql.org/docs/current/static/sql-insert.html#SQL-ON-CONFLICT>`_ - in the Postgresql documentation.
.. _postgresql_match:
Full Text Search
----------------
SQLAlchemy makes available the Postgresql ``@@`` operator via the
:meth:`.ColumnElement.match` method on any textual column expression.
On a Postgresql dialect, an expression like the following::
select([sometable.c.text.match("search string")])
will emit to the database::
SELECT text @@ to_tsquery('search string') FROM table
The Postgresql text search functions such as ``to_tsquery()``
and ``to_tsvector()`` are available
explicitly using the standard :data:`.func` construct. For example::
select([
func.to_tsvector('fat cats ate rats').match('cat & rat')
])
Emits the equivalent of::
SELECT to_tsvector('fat cats ate rats') @@ to_tsquery('cat & rat')
The :class:`.postgresql.TSVECTOR` type can provide for explicit CAST::
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy import select, cast
select([cast("some text", TSVECTOR)])
produces a statement equivalent to::
SELECT CAST('some text' AS TSVECTOR) AS anon_1
Full Text Searches in Postgresql are influenced by a combination of: the
PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used
to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in
during a query.
When performing a Full Text Search against a column that has a GIN or
GiST index that is already pre-computed (which is common on full text
searches) one may need to explicitly pass in a particular PostgresSQL
``regconfig`` value to ensure the query-planner utilizes the index and does
not re-compute the column on demand.
In order to provide for this explicit query planning, or to use different
search strategies, the ``match`` method accepts a ``postgresql_regconfig``
keyword argument::
select([mytable.c.id]).where(
mytable.c.title.match('somestring', postgresql_regconfig='english')
)
Emits the equivalent of::
SELECT mytable.id FROM mytable
WHERE mytable.title @@ to_tsquery('english', 'somestring')
One can also specifically pass in a `'regconfig'` value to the
``to_tsvector()`` command as the initial argument::
select([mytable.c.id]).where(
func.to_tsvector('english', mytable.c.title )\
.match('somestring', postgresql_regconfig='english')
)
produces a statement equivalent to::
SELECT mytable.id FROM mytable
WHERE to_tsvector('english', mytable.title) @@
to_tsquery('english', 'somestring')
It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from
PostgresSQL to ensure that you are generating queries with SQLAlchemy that
take full advantage of any indexes you may have created for full text search.
FROM ONLY ...
------------------------
The dialect supports PostgreSQL's ONLY keyword for targeting only a particular
table in an inheritance hierarchy. This can be used to produce the
``SELECT ... FROM ONLY``, ``UPDATE ONLY ...``, and ``DELETE FROM ONLY ...``
syntaxes. It uses SQLAlchemy's hints mechanism::
# SELECT ... FROM ONLY ...
result = table.select().with_hint(table, 'ONLY', 'postgresql')
print result.fetchall()
# UPDATE ONLY ...
table.update(values=dict(foo='bar')).with_hint('ONLY',
dialect_name='postgresql')
# DELETE FROM ONLY ...
table.delete().with_hint('ONLY', dialect_name='postgresql')
.. _postgresql_indexes:
Postgresql-Specific Index Options
---------------------------------
Several extensions to the :class:`.Index` construct are available, specific
to the PostgreSQL dialect.
.. _postgresql_partial_indexes:
Partial Indexes
^^^^^^^^^^^^^^^^
Partial indexes add criterion to the index definition so that the index is
applied to a subset of rows. These can be specified on :class:`.Index`
using the ``postgresql_where`` keyword argument::
Index('my_index', my_table.c.id, postgresql_where=my_table.c.value > 10)
Operator Classes
^^^^^^^^^^^^^^^^^
PostgreSQL allows the specification of an *operator class* for each column of
an index (see
http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html).
The :class:`.Index` construct allows these to be specified via the
``postgresql_ops`` keyword argument::
Index('my_index', my_table.c.id, my_table.c.data,
postgresql_ops={
'data': 'text_pattern_ops',
'id': 'int4_ops'
})
.. versionadded:: 0.7.2
``postgresql_ops`` keyword argument to :class:`.Index` construct.
Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of
the :class:`.Column`, i.e. the name used to access it from the ``.c``
collection of :class:`.Table`, which can be configured to be different than
the actual name of the column as expressed in the database.
Index Types
^^^^^^^^^^^^
PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well
as the ability for users to create their own (see
http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be
specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
Index('my_index', my_table.c.data, postgresql_using='gin')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX command, so it *must* be a valid index type for your
version of PostgreSQL.
.. _postgresql_index_storage:
Index Storage Parameters
^^^^^^^^^^^^^^^^^^^^^^^^
PostgreSQL allows storage parameters to be set on indexes. The storage
parameters available depend on the index method used by the index. Storage
parameters can be specified on :class:`.Index` using the ``postgresql_with``
keyword argument::
Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50})
.. versionadded:: 1.0.6
PostgreSQL allows to define the tablespace in which to create the index.
The tablespace can be specified on :class:`.Index` using the
``postgresql_tablespace`` keyword argument::
Index('my_index', my_table.c.data, postgresql_tablespace='my_tablespace')
.. versionadded:: 1.1
Note that the same option is available on :class:`.Table` as well.
.. _postgresql_index_concurrently:
Indexes with CONCURRENTLY
^^^^^^^^^^^^^^^^^^^^^^^^^
The Postgresql index option CONCURRENTLY is supported by passing the
flag ``postgresql_concurrently`` to the :class:`.Index` construct::
tbl = Table('testtbl', m, Column('data', Integer))
idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True)
The above index construct will render DDL for CREATE INDEX, assuming
Postgresql 8.2 or higher is detected or for a connection-less dialect, as::
CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)
For DROP INDEX, assuming Postgresql 9.2 or higher is detected or for
a connection-less dialect, it will emit::
DROP INDEX CONCURRENTLY test_idx1
.. versionadded:: 1.1 support for CONCURRENTLY on DROP INDEX. The
CONCURRENTLY keyword is now only emitted if a high enough version
of Postgresql is detected on the connection (or for a connection-less
dialect).
.. _postgresql_index_reflection:
Postgresql Index Reflection
---------------------------
The Postgresql database creates a UNIQUE INDEX implicitly whenever the
UNIQUE CONSTRAINT construct is used. When inspecting a table using
:class:`.Inspector`, the :meth:`.Inspector.get_indexes`
and the :meth:`.Inspector.get_unique_constraints` will report on these
two constructs distinctly; in the case of the index, the key
``duplicates_constraint`` will be present in the index entry if it is
detected as mirroring a constraint. When performing reflection using
``Table(..., autoload=True)``, the UNIQUE INDEX is **not** returned
in :attr:`.Table.indexes` when it is detected as mirroring a
:class:`.UniqueConstraint` in the :attr:`.Table.constraints` collection.
.. versionchanged:: 1.0.0 - :class:`.Table` reflection now includes
:class:`.UniqueConstraint` objects present in the :attr:`.Table.constraints`
collection; the Postgresql backend will no longer include a "mirrored"
:class:`.Index` construct in :attr:`.Table.indexes` if it is detected
as corresponding to a unique constraint.
Special Reflection Options
--------------------------
The :class:`.Inspector` used for the Postgresql backend is an instance
of :class:`.PGInspector`, which offers additional methods::
from sqlalchemy import create_engine, inspect
engine = create_engine("postgresql+psycopg2://localhost/test")
insp = inspect(engine) # will be a PGInspector
print(insp.get_enums())
.. autoclass:: PGInspector
:members:
.. _postgresql_table_options:
PostgreSQL Table Options
-------------------------
Several options for CREATE TABLE are supported directly by the PostgreSQL
dialect in conjunction with the :class:`.Table` construct:
* ``TABLESPACE``::
Table("some_table", metadata, ..., postgresql_tablespace='some_tablespace')
The above option is also available on the :class:`.Index` construct.
* ``ON COMMIT``::
Table("some_table", metadata, ..., postgresql_on_commit='PRESERVE ROWS')
* ``WITH OIDS``::
Table("some_table", metadata, ..., postgresql_with_oids=True)
* ``WITHOUT OIDS``::
Table("some_table", metadata, ..., postgresql_with_oids=False)
* ``INHERITS``::
Table("some_table", metadata, ..., postgresql_inherits="some_supertable")
Table("some_table", metadata, ..., postgresql_inherits=("t1", "t2", ...))
.. versionadded:: 1.0.0
.. seealso::
`Postgresql CREATE TABLE options
<http://www.postgresql.org/docs/current/static/sql-createtable.html>`_
ARRAY Types
-----------
The Postgresql dialect supports arrays, both as multidimensional column types
as well as array literals:
* :class:`.postgresql.ARRAY` - ARRAY datatype
* :class:`.postgresql.array` - array literal
* :func:`.postgresql.array_agg` - ARRAY_AGG SQL function
* :class:`.postgresql.aggregate_order_by` - helper for PG's ORDER BY aggregate
function syntax.
JSON Types
----------
The Postgresql dialect supports both JSON and JSONB datatypes, including
psycopg2's native support and support for all of Postgresql's special
operators:
* :class:`.postgresql.JSON`
* :class:`.postgresql.JSONB`
HSTORE Type
-----------
The Postgresql HSTORE type as well as hstore literals are supported:
* :class:`.postgresql.HSTORE` - HSTORE datatype
* :class:`.postgresql.hstore` - hstore literal
ENUM Types
----------
Postgresql has an independently creatable TYPE structure which is used
to implement an enumerated type. This approach introduces significant
complexity on the SQLAlchemy side in terms of when this type should be
CREATED and DROPPED. The type object is also an independently reflectable
entity. The following sections should be consulted:
* :class:`.postgresql.ENUM` - DDL and typing support for ENUM.
* :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types
* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual
CREATE and DROP commands for ENUM.
.. _postgresql_array_of_enum:
Using ENUM with ARRAY
^^^^^^^^^^^^^^^^^^^^^
The combination of ENUM and ARRAY is not directly supported by backend
DBAPIs at this time. In order to send and receive an ARRAY of ENUM,
use the following workaround type::
class ArrayOfEnum(ARRAY):
def bind_expression(self, bindvalue):
return sa.cast(bindvalue, self)
def result_processor(self, dialect, coltype):
super_rp = super(ArrayOfEnum, self).result_processor(
dialect, coltype)
def handle_raw_string(value):
inner = re.match(r"^{(.*)}$", value).group(1)
return inner.split(",") if inner else []
def process(value):
if value is None:
return None
return super_rp(handle_raw_string(value))
return process
E.g.::
Table(
'mydata', metadata,
Column('id', Integer, primary_key=True),
Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum')))
)
This type is not included as a built-in type as it would be incompatible
with a DBAPI that suddenly decides to support ARRAY of ENUM directly in
a new version.
"""
from collections import defaultdict
import re
import datetime as dt
from ... import sql, schema, exc, util
from ...engine import default, reflection
from ...sql import compiler, expression
from ... import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
CHAR, TEXT, FLOAT, NUMERIC, \
DATE, BOOLEAN, REAL
RESERVED_WORDS = set(
["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
"asymmetric", "both", "case", "cast", "check", "collate", "column",
"constraint", "create", "current_catalog", "current_date",
"current_role", "current_time", "current_timestamp", "current_user",
"default", "deferrable", "desc", "distinct", "do", "else", "end",
"except", "false", "fetch", "for", "foreign", "from", "grant", "group",
"having", "in", "initially", "intersect", "into", "leading", "limit",
"localtime", "localtimestamp", "new", "not", "null", "of", "off",
"offset", "old", "on", "only", "or", "order", "placing", "primary",
"references", "returning", "select", "session_user", "some", "symmetric",
"table", "then", "to", "trailing", "true", "union", "unique", "user",
"using", "variadic", "when", "where", "window", "with", "authorization",
"between", "binary", "cross", "current_schema", "freeze", "full",
"ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
"notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
])
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
class BYTEA(sqltypes.LargeBinary):
__visit_name__ = 'BYTEA'
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = 'DOUBLE_PRECISION'
class INET(sqltypes.TypeEngine):
__visit_name__ = "INET"
PGInet = INET
class CIDR(sqltypes.TypeEngine):
__visit_name__ = "CIDR"
PGCidr = CIDR
class MACADDR(sqltypes.TypeEngine):
__visit_name__ = "MACADDR"
PGMacAddr = MACADDR
class OID(sqltypes.TypeEngine):
"""Provide the Postgresql OID type.
.. versionadded:: 0.9.5
"""
__visit_name__ = "OID"
class TIMESTAMP(sqltypes.TIMESTAMP):
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.TypeEngine):
"""Postgresql INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000 or zxjdbc.
"""
__visit_name__ = 'INTERVAL'
def __init__(self, precision=None):
self.precision = precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(precision=interval.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
@property
def python_type(self):
return dt.timedelta
PGInterval = INTERVAL
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
def __init__(self, length=None, varying=False):
if not varying:
# BIT without VARYING defaults to length 1
self.length = length or 1
else:
# but BIT VARYING can be unlimited-length, so no default
self.length = length
self.varying = varying
PGBit = BIT
class UUID(sqltypes.TypeEngine):
"""Postgresql UUID type.
Represents the UUID column type, interpreting
data either as natively returned by the DBAPI
or as Python uuid objects.
The UUID type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = 'UUID'
def __init__(self, as_uuid=False):
"""Construct a UUID type.
:param as_uuid=False: if True, values will be interpreted
as Python uuid objects, converting to/from string via the
DBAPI.
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
"This version of Python does not support "
"the native UUID type."
)
self.as_uuid = as_uuid
def bind_processor(self, dialect):
if self.as_uuid:
def process(value):
if value is not None:
value = util.text_type(value)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
if self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
else:
return None
PGUuid = UUID
class TSVECTOR(sqltypes.TypeEngine):
"""The :class:`.postgresql.TSVECTOR` type implements the Postgresql
text search type TSVECTOR.
It can be used to do full text queries on natural language
documents.
.. versionadded:: 0.9.0
.. seealso::
:ref:`postgresql_match`
"""
__visit_name__ = 'TSVECTOR'
class ENUM(sqltypes.Enum):
"""Postgresql ENUM type.
This is a subclass of :class:`.types.Enum` which includes
support for PG's ``CREATE TYPE`` and ``DROP TYPE``.
When the builtin type :class:`.types.Enum` is used and the
:paramref:`.Enum.native_enum` flag is left at its default of
True, the Postgresql backend will use a :class:`.postgresql.ENUM`
type as the implementation, so the special create/drop rules
will be used.
The create/drop behavior of ENUM is necessarily intricate, due to the
awkward relationship the ENUM type has in relationship to the
parent table, in that it may be "owned" by just a single table, or
may be shared among many tables.
When using :class:`.types.Enum` or :class:`.postgresql.ENUM`
in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted
corresponding to when the :meth:`.Table.create` and :meth:`.Table.drop`
methods are called::
table = Table('sometable', metadata,
Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
)
table.create(engine) # will emit CREATE ENUM and CREATE TABLE
table.drop(engine) # will emit DROP TABLE and DROP ENUM
To use a common enumerated type between multiple tables, the best
practice is to declare the :class:`.types.Enum` or
:class:`.postgresql.ENUM` independently, and associate it with the
:class:`.MetaData` object itself::
my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
t1 = Table('sometable_one', metadata,
Column('some_enum', myenum)
)
t2 = Table('sometable_two', metadata,
Column('some_enum', myenum)
)
When this pattern is used, care must still be taken at the level
of individual table creates. Emitting CREATE TABLE without also
specifying ``checkfirst=True`` will still cause issues::
t1.create(engine) # will fail: no such type 'myenum'
If we specify ``checkfirst=True``, the individual table-level create
operation will check for the ``ENUM`` and create if not exists::
# will check if enum exists, and emit CREATE TYPE if not
t1.create(engine, checkfirst=True)
When using a metadata-level ENUM type, the type will always be created
and dropped if either the metadata-wide create/drop is called::
metadata.create_all(engine) # will emit CREATE TYPE
metadata.drop_all(engine) # will emit DROP TYPE
The type can also be created and dropped directly::
my_enum.create(engine)
my_enum.drop(engine)
.. versionchanged:: 1.0.0 The Postgresql :class:`.postgresql.ENUM` type
now behaves more strictly with regards to CREATE/DROP. A metadata-level
ENUM type will only be created and dropped at the metadata level,
not the table level, with the exception of
``table.create(checkfirst=True)``.
The ``table.drop()`` call will now emit a DROP TYPE for a table-level
enumerated type.
"""
def __init__(self, *enums, **kw):
"""Construct an :class:`~.postgresql.ENUM`.
Arguments are the same as that of
:class:`.types.Enum`, but also including
the following parameters.
:param create_type: Defaults to True.
Indicates that ``CREATE TYPE`` should be
emitted, after optionally checking for the
presence of the type, when the parent
table is being created; and additionally
that ``DROP TYPE`` is called when the table
is dropped. When ``False``, no check
will be performed and no ``CREATE TYPE``
or ``DROP TYPE`` is emitted, unless
:meth:`~.postgresql.ENUM.create`
or :meth:`~.postgresql.ENUM.drop`
are called directly.
Setting to ``False`` is helpful
when invoking a creation scheme to a SQL file
without access to the actual database -
the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods can
be used to emit SQL to a target bind.
.. versionadded:: 0.7.4
"""
self.create_type = kw.pop("create_type", True)
super(ENUM, self).__init__(*enums, **kw)
def create(self, bind=None, checkfirst=True):
"""Emit ``CREATE TYPE`` for this
:class:`~.postgresql.ENUM`.
If the underlying dialect does not support
Postgresql CREATE TYPE, no action is taken.
:param bind: a connectable :class:`.Engine`,
:class:`.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
not bind.dialect.has_type(
bind, self.name, schema=self.schema):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
"""Emit ``DROP TYPE`` for this
:class:`~.postgresql.ENUM`.
If the underlying dialect does not support
Postgresql DROP TYPE, no action is taken.
:param bind: a connectable :class:`.Engine`,
:class:`.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(DropEnumType(self))
def _check_for_name_in_memos(self, checkfirst, kw):
"""Look in the 'ddl runner' for 'memos', then
note our name in that collection.
This to ensure a particular named enum is operated
upon only once within any kind of create/drop
sequence without relying upon "checkfirst".
"""
if not self.create_type:
return True
if '_ddl_runner' in kw:
ddl_runner = kw['_ddl_runner']
if '_pg_enums' in ddl_runner.memo:
pg_enums = ddl_runner.memo['_pg_enums']
else:
pg_enums = ddl_runner.memo['_pg_enums'] = set()
present = self.name in pg_enums
pg_enums.add(self.name)
return present
else:
return False
def _on_table_create(self, target, bind, checkfirst, **kw):
if checkfirst or (
not self.metadata and
not kw.get('_is_metadata_operation', False)) and \
not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
def _on_table_drop(self, target, bind, checkfirst, **kw):
if not self.metadata and \
not kw.get('_is_metadata_operation', False) and \
not self._check_for_name_in_memos(checkfirst, kw):
self.drop(bind=bind, checkfirst=checkfirst)
def _on_metadata_create(self, target, bind, checkfirst, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
def _on_metadata_drop(self, target, bind, checkfirst, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.drop(bind=bind, checkfirst=checkfirst)
colspecs = {
sqltypes.Interval: INTERVAL,
sqltypes.Enum: ENUM,
}
ischema_names = {
'integer': INTEGER,
'bigint': BIGINT,
'smallint': SMALLINT,
'character varying': VARCHAR,
'character': CHAR,
'"char"': sqltypes.String,
'name': sqltypes.String,
'text': TEXT,
'numeric': NUMERIC,
'float': FLOAT,
'real': REAL,
'inet': INET,
'cidr': CIDR,
'uuid': UUID,
'bit': BIT,
'bit varying': BIT,
'macaddr': MACADDR,
'oid': OID,
'double precision': DOUBLE_PRECISION,
'timestamp': TIMESTAMP,
'timestamp with time zone': TIMESTAMP,
'timestamp without time zone': TIMESTAMP,
'time with time zone': TIME,
'time without time zone': TIME,
'date': DATE,
'time': TIME,
'bytea': BYTEA,
'boolean': BOOLEAN,
'interval': INTERVAL,
'interval year to month': INTERVAL,
'interval day to second': INTERVAL,
'tsvector': TSVECTOR
}
class PGCompiler(compiler.SQLCompiler):
def visit_array(self, element, **kw):
return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
def visit_slice(self, element, **kw):
return "%s:%s" % (
self.process(element.start, **kw),
self.process(element.stop, **kw),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
kw['eager_grouping'] = True
return self._generate_generic_binary(
binary, " -> ", **kw
)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
kw['eager_grouping'] = True
return self._generate_generic_binary(
binary, " #> ", **kw
)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_aggregate_order_by(self, element, **kw):
return "%s ORDER BY %s" % (
self.process(element.target, **kw),
self.process(element.order_by, **kw)
)
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
regconfig = self.render_literal_value(
binary.modifiers['postgresql_regconfig'],
sqltypes.STRINGTYPE)
if regconfig:
return "%s @@ to_tsquery(%s, %s)" % (
self.process(binary.left, **kw),
regconfig,
self.process(binary.right, **kw)
)
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s ILIKE %s' % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT ILIKE %s' % \
(self.process(binary.left, **kw),
self.process(binary.right, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def render_literal_value(self, value, type_):
value = super(PGCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
def visit_sequence(self, seq):
return "nextval('%s')" % self.preparer.format_sequence(seq)
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += " \n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += " \n LIMIT ALL"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def format_from_hint_text(self, sqltext, table, hint, iscrud):
if hint.upper() != 'ONLY':
raise exc.CompileError("Unrecognized hint: %r" % hint)
return "ONLY " + sqltext
def get_select_precolumns(self, select, **kw):
if select._distinct is not False:
if select._distinct is True:
return "DISTINCT "
elif isinstance(select._distinct, (list, tuple)):
return "DISTINCT ON (" + ', '.join(
[self.process(col) for col in select._distinct]
) + ") "
else:
return "DISTINCT ON (" + \
self.process(select._distinct, **kw) + ") "
else:
return ""
def for_update_clause(self, select, **kw):
if select._for_update_arg.read:
if select._for_update_arg.key_share:
tmp = " FOR KEY SHARE"
else:
tmp = " FOR SHARE"
elif select._for_update_arg.key_share:
tmp = " FOR NO KEY UPDATE"
else:
tmp = " FOR UPDATE"
if select._for_update_arg.of:
tables = util.OrderedSet(
c.table if isinstance(c, expression.ColumnClause)
else c for c in select._for_update_arg.of)
tmp += " OF " + ", ".join(
self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
if select._for_update_arg.nowait:
tmp += " NOWAIT"
if select._for_update_arg.skip_locked:
tmp += " SKIP LOCKED"
return tmp
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0], **kw)
start = self.process(func.clauses.clauses[1], **kw)
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2], **kw)
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
target_text = 'ON CONSTRAINT %s' % clause.constraint_target
elif clause.inferred_target_elements is not None:
target_text = '(%s)' % ', '.join(
(self.preparer.quote(c)
if isinstance(c, util.string_types)
else
self.process(c, include_table=False, use_schema=False))
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
target_text += ' WHERE %s' % \
self.process(
clause.inferred_target_whereclause,
use_schema=False
)
else:
target_text = ''
return target_text
def visit_on_conflict_do_nothing(self, on_conflict, **kw):
target_text = self._on_conflict_target(on_conflict, **kw)
if target_text:
return "ON CONFLICT %s DO NOTHING" % target_text
else:
return "ON CONFLICT DO NOTHING"
def visit_on_conflict_do_update(self, on_conflict, **kw):
clause = on_conflict
target_text = self._on_conflict_target(on_conflict, **kw)
action_set_ops = []
for k, v in clause.update_values_to_set:
key_text = (
self.preparer.quote(k)
if isinstance(k, util.string_types)
else self.process(k, use_schema=False)
)
value_text = self.process(
v,
use_schema=False
)
action_set_ops.append('%s = %s' % (key_text, value_text))
action_text = ', '.join(action_set_ops)
if clause.update_whereclause is not None:
action_text += ' WHERE %s' % \
self.process(
clause.update_whereclause,
include_table=True,
use_schema=False
)
return 'ON CONFLICT %s DO UPDATE SET %s' % (target_text, action_text)
class PGDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
impl_type = column.type.dialect_impl(self.dialect)
if isinstance(impl_type, sqltypes.TypeDecorator):
impl_type = impl_type.impl
if column.primary_key and \
column is column.table._autoincrement_column and \
(
self.dialect.supports_smallserial or
not isinstance(impl_type, sqltypes.SmallInteger)
) and (
column.default is None or
(
isinstance(column.default, schema.Sequence) and
column.default.optional
)):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
elif isinstance(impl_type, sqltypes.SmallInteger):
colspec += " SMALLSERIAL"
else:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(
column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def visit_create_enum_type(self, create):
type_ = create.element
return "CREATE TYPE %s AS ENUM (%s)" % (
self.preparer.format_type(type_),
", ".join(
self.sql_compiler.process(sql.literal(e), literal_binds=True)
for e in type_.enums)
)
def visit_drop_enum_type(self, drop):
type_ = drop.element
return "DROP TYPE %s" % (
self.preparer.format_type(type_)
)
def visit_create_index(self, create):
preparer = self.preparer
index = create.element
self._verify_index_table(index)
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX "
if self.dialect._supports_create_index_concurrently:
concurrently = index.dialect_options['postgresql']['concurrently']
if concurrently:
text += "CONCURRENTLY "
text += "%s ON %s " % (
self._prepared_index_name(index,
include_schema=False),
preparer.format_table(index.table)
)
using = index.dialect_options['postgresql']['using']
if using:
text += "USING %s " % preparer.quote(using)
ops = index.dialect_options["postgresql"]["ops"]
text += "(%s)" \
% (
', '.join([
self.sql_compiler.process(
expr.self_group()
if not isinstance(expr, expression.ColumnClause)
else expr,
include_table=False, literal_binds=True) +
(
(' ' + ops[expr.key])
if hasattr(expr, 'key')
and expr.key in ops else ''
)
for expr in index.expressions
])
)
withclause = index.dialect_options['postgresql']['with']
if withclause:
text += " WITH (%s)" % (', '.join(
['%s = %s' % storage_parameter
for storage_parameter in withclause.items()]))
tablespace_name = index.dialect_options['postgresql']['tablespace']
if tablespace_name:
text += " TABLESPACE %s" % preparer.quote(tablespace_name)
whereclause = index.dialect_options["postgresql"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False,
literal_binds=True)
text += " WHERE " + where_compiled
return text
def visit_drop_index(self, drop):
index = drop.element
text = "\nDROP INDEX "
if self.dialect._supports_drop_index_concurrently:
concurrently = index.dialect_options['postgresql']['concurrently']
if concurrently:
text += "CONCURRENTLY "
text += self._prepared_index_name(index, include_schema=True)
return text
def visit_exclude_constraint(self, constraint, **kw):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
elements = []
for expr, name, op in constraint._render_exprs:
kw['include_table'] = False
elements.append(
"%s WITH %s" % (self.sql_compiler.process(expr, **kw), op)
)
text += "EXCLUDE USING %s (%s)" % (constraint.using,
', '.join(elements))
if constraint.where is not None:
text += ' WHERE (%s)' % self.sql_compiler.process(
constraint.where,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def post_create_table(self, table):
table_opts = []
pg_opts = table.dialect_options['postgresql']
inherits = pg_opts.get('inherits')
if inherits is not None:
if not isinstance(inherits, (list, tuple)):
inherits = (inherits, )
table_opts.append(
'\n INHERITS ( ' +
', '.join(self.preparer.quote(name) for name in inherits) +
' )')
if pg_opts['with_oids'] is True:
table_opts.append('\n WITH OIDS')
elif pg_opts['with_oids'] is False:
table_opts.append('\n WITHOUT OIDS')
if pg_opts['on_commit']:
on_commit_options = pg_opts['on_commit'].replace("_", " ").upper()
table_opts.append('\n ON COMMIT %s' % on_commit_options)
if pg_opts['tablespace']:
tablespace_name = pg_opts['tablespace']
table_opts.append(
'\n TABLESPACE %s' % self.preparer.quote(tablespace_name)
)
return ''.join(table_opts)
class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_TSVECTOR(self, type, **kw):
return "TSVECTOR"
def visit_INET(self, type_, **kw):
return "INET"
def visit_CIDR(self, type_, **kw):
return "CIDR"
def visit_MACADDR(self, type_, **kw):
return "MACADDR"
def visit_OID(self, type_, **kw):
return "OID"
def visit_FLOAT(self, type_, **kw):
if not type_.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': type_.precision}
def visit_DOUBLE_PRECISION(self, type_, **kw):
return "DOUBLE PRECISION"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_HSTORE(self, type_, **kw):
return "HSTORE"
def visit_JSON(self, type_, **kw):
return "JSON"
def visit_JSONB(self, type_, **kw):
return "JSONB"
def visit_INT4RANGE(self, type_, **kw):
return "INT4RANGE"
def visit_INT8RANGE(self, type_, **kw):
return "INT8RANGE"
def visit_NUMRANGE(self, type_, **kw):
return "NUMRANGE"
def visit_DATERANGE(self, type_, **kw):
return "DATERANGE"
def visit_TSRANGE(self, type_, **kw):
return "TSRANGE"
def visit_TSTZRANGE(self, type_, **kw):
return "TSTZRANGE"
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_enum(self, type_, **kw):
if not type_.native_enum or not self.dialect.supports_native_enum:
return super(PGTypeCompiler, self).visit_enum(type_, **kw)
else:
return self.visit_ENUM(type_, **kw)
def visit_ENUM(self, type_, **kw):
return self.dialect.identifier_preparer.format_type(type_)
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_TIME(self, type_, **kw):
return "TIME%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_INTERVAL(self, type_, **kw):
if type_.precision is not None:
return "INTERVAL(%d)" % type_.precision
else:
return "INTERVAL"
def visit_BIT(self, type_, **kw):
if type_.varying:
compiled = "BIT VARYING"
if type_.length is not None:
compiled += "(%d)" % type_.length
else:
compiled = "BIT(%d)" % type_.length
return compiled
def visit_UUID(self, type_, **kw):
return "UUID"
def visit_large_binary(self, type_, **kw):
return self.visit_BYTEA(type_, **kw)
def visit_BYTEA(self, type_, **kw):
return "BYTEA"
def visit_ARRAY(self, type_, **kw):
return self.process(type_.item_type) + ('[]' * (type_.dimensions
if type_.dimensions
is not None else 1))
class PGIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].\
replace(self.escape_to_quote, self.escape_quote)
return value
def format_type(self, type_, use_schema=True):
if not type_.name:
raise exc.CompileError("Postgresql ENUM type requires a name.")
name = self.quote(type_.name)
effective_schema = self.schema_for_object(type_)
if not self.omit_schema and use_schema and \
effective_schema is not None:
name = self.quote_schema(effective_schema) + "." + name
return name
class PGInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_oid(self, table_name, schema=None):
"""Return the OID for the given table name."""
return self.dialect.get_table_oid(self.bind, table_name, schema,
info_cache=self.info_cache)
def get_enums(self, schema=None):
"""Return a list of ENUM objects.
Each member is a dictionary containing these fields:
* name - name of the enum
* schema - the schema name for the enum.
* visible - boolean, whether or not this enum is visible
in the default search path.
* labels - a list of string labels that apply to the enum.
:param schema: schema name. If None, the default schema
(typically 'public') is used. May also be set to '*' to
indicate load enums for all schemas.
.. versionadded:: 1.0.0
"""
schema = schema or self.default_schema_name
return self.dialect._load_enums(self.bind, schema)
def get_foreign_table_names(self, schema=None):
"""Return a list of FOREIGN TABLE names.
Behavior is similar to that of :meth:`.Inspector.get_table_names`,
except that the list is limited to those tables tha report a
``relkind`` value of ``f``.
.. versionadded:: 1.0.0
"""
schema = schema or self.default_schema_name
return self.dialect._get_foreign_table_names(self.bind, schema)
def get_view_names(self, schema=None, include=('plain', 'materialized')):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
For special quoting, use :class:`.quoted_name`.
:param include: specify which types of views to return. Passed
as a string value (for a single type) or a tuple (for any number
of types). Defaults to ``('plain', 'materialized')``.
.. versionadded:: 1.1
"""
return self.dialect.get_view_names(self.bind, schema,
info_cache=self.info_cache,
include=include)
class CreateEnumType(schema._CreateDropBase):
__visit_name__ = "create_enum_type"
class DropEnumType(schema._CreateDropBase):
__visit_name__ = "drop_enum_type"
class PGExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar((
"select nextval('%s')" %
self.dialect.identifier_preparer.format_sequence(seq)), type_)
def get_insert_default(self, column):
if column.primary_key and \
column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar("select %s" %
column.server_default.arg,
column.type)
elif (column.default is None or
(column.default.is_sequence and
column.default.optional)):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0:29 + max(0, (29 - len(col)))]
col = col[0:29 + max(0, (29 - len(tab)))]
name = "%s_%s_seq" % (tab, col)
column._postgresql_seq_name = seq_name = name
if column.table is not None:
effective_schema = self.connection.schema_for_object(
column.table)
else:
effective_schema = None
if effective_schema is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
(effective_schema, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
(seq_name, )
return self._execute_scalar(exc, column.type)
return super(PGExecutionContext, self).get_insert_default(column)
class PGDialect(default.DefaultDialect):
name = 'postgresql'
supports_alter = True
max_identifier_length = 63
supports_sane_rowcount = True
supports_native_enum = True
supports_native_boolean = True
supports_smallserial = True
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = True
postfetch_lastrowid = False
supports_default_values = True
supports_empty_insert = False
supports_multivalues_insert = True
default_paramstyle = 'pyformat'
ischema_names = ischema_names
colspecs = colspecs
statement_compiler = PGCompiler
ddl_compiler = PGDDLCompiler
type_compiler = PGTypeCompiler
preparer = PGIdentifierPreparer
execution_ctx_cls = PGExecutionContext
inspector = PGInspector
isolation_level = None
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": {},
"concurrently": False,
"with": {},
"tablespace": None
}),
(schema.Table, {
"ignore_search_path": False,
"tablespace": None,
"with_oids": None,
"on_commit": None,
"inherits": None
}),
]
reflection_options = ('postgresql_ignore_search_path', )
_backslash_escapes = True
_supports_create_index_concurrently = True
_supports_drop_index_concurrently = True
def __init__(self, isolation_level=None, json_serializer=None,
json_deserializer=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_deserializer = json_deserializer
self._json_serializer = json_serializer
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (8, 2) and \
self.__dict__.get('implicit_returning', True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
# pop base Enum type
self.colspecs.pop(sqltypes.Enum, None)
# psycopg2, others may have placed ENUM here as well
self.colspecs.pop(ENUM, None)
# http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
self.supports_smallserial = self.server_version_info >= (9, 2)
self._backslash_escapes = self.server_version_info < (8, 2) or \
connection.scalar(
"show standard_conforming_strings"
) == 'off'
self._supports_create_index_concurrently = \
self.server_version_info >= (8, 2)
self._supports_drop_index_concurrently = \
self.server_version_info >= (9, 2)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('show transaction isolation level')
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
def do_begin_twophase(self, connection, xid):
self.do_begin(connection.connection)
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
# FIXME: ugly hack to get out of transaction
# context when committing recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
connection.execute("ROLLBACK")
connection.execute("ROLLBACK PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
connection.execute("COMMIT PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
resultset = connection.execute(
sql.text("SELECT gid FROM pg_prepared_xacts"))
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_schema(self, connection, schema):
query = ("select nspname from pg_namespace "
"where lower(nspname)=:schema")
cursor = connection.execute(
sql.text(
query,
bindparams=[
sql.bindparam(
'schema', util.text_type(schema.lower()),
type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where "
"pg_catalog.pg_table_is_visible(c.oid) "
"and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(table_name),
type_=sqltypes.Unicode)]
)
)
else:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=:schema and "
"relname=:name",
bindparams=[
sql.bindparam('name',
util.text_type(table_name),
type_=sqltypes.Unicode),
sql.bindparam('schema',
util.text_type(schema),
type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_sequence(self, connection, sequence_name, schema=None):
if schema is None:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=current_schema() "
"and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(sequence_name),
type_=sqltypes.Unicode)
]
)
)
else:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=:schema and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(sequence_name),
type_=sqltypes.Unicode),
sql.bindparam('schema',
util.text_type(schema),
type_=sqltypes.Unicode)
]
)
)
return bool(cursor.first())
def has_type(self, connection, type_name, schema=None):
if schema is not None:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
WHERE t.typnamespace = n.oid
AND t.typname = :typname
AND n.nspname = :nspname
)
"""
query = sql.text(query)
else:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t
WHERE t.typname = :typname
AND pg_type_is_visible(t.oid)
)
"""
query = sql.text(query)
query = query.bindparams(
sql.bindparam('typname',
util.text_type(type_name), type_=sqltypes.Unicode),
)
if schema is not None:
query = query.bindparams(
sql.bindparam('nspname',
util.text_type(schema), type_=sqltypes.Unicode),
)
cursor = connection.execute(query)
return bool(cursor.scalar())
def _get_server_version_info(self, connection):
v = connection.execute("select version()").scalar()
m = re.match(
'.*(?:PostgreSQL|EnterpriseDB) '
'(\d+)\.(\d+)(?:\.(\d+))?(?:\.\d+)?(?:devel)?',
v)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % v)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
def get_table_oid(self, connection, table_name, schema=None, **kw):
"""Fetch the oid for schema.table_name.
Several reflection methods require the table oid. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_oid = None
if schema is not None:
schema_where_clause = "n.nspname = :schema"
else:
schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
query = """
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (%s)
AND c.relname = :table_name AND c.relkind in ('r', 'v', 'm', 'f')
""" % schema_where_clause
# Since we're binding to unicode, table_name and schema_name must be
# unicode.
table_name = util.text_type(table_name)
if schema is not None:
schema = util.text_type(schema)
s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
s = s.columns(oid=sqltypes.Integer)
if schema:
s = s.bindparams(sql.bindparam('schema', type_=sqltypes.Unicode))
c = connection.execute(s, table_name=table_name, schema=schema)
table_oid = c.scalar()
if table_oid is None:
raise exc.NoSuchTableError(table_name)
return table_oid
@reflection.cache
def get_schema_names(self, connection, **kw):
result = connection.execute(
sql.text("SELECT nspname FROM pg_namespace "
"WHERE nspname NOT LIKE 'pg_%' "
"ORDER BY nspname"
).columns(nspname=sqltypes.Unicode))
return [name for name, in result]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
result = connection.execute(
sql.text("SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind = 'r'"
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name)
return [name for name, in result]
@reflection.cache
def _get_foreign_table_names(self, connection, schema=None, **kw):
result = connection.execute(
sql.text("SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind = 'f'"
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name)
return [name for name, in result]
@reflection.cache
def get_view_names(
self, connection, schema=None,
include=('plain', 'materialized'), **kw):
include_kind = {'plain': 'v', 'materialized': 'm'}
try:
kinds = [include_kind[i] for i in util.to_list(include)]
except KeyError:
raise ValueError(
"include %r unknown, needs to be a sequence containing "
"one or both of 'plain' and 'materialized'" % (include,))
if not kinds:
raise ValueError(
"empty include, needs to be a sequence containing "
"one or both of 'plain' and 'materialized'")
result = connection.execute(
sql.text("SELECT c.relname FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relkind IN (%s)" %
(", ".join("'%s'" % elem for elem in kinds))
).columns(relname=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name)
return [name for name, in result]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
view_def = connection.scalar(
sql.text("SELECT pg_get_viewdef(c.oid) view_def FROM pg_class c "
"JOIN pg_namespace n ON n.oid = c.relnamespace "
"WHERE n.nspname = :schema AND c.relname = :view_name "
"AND c.relkind IN ('v', 'm')"
).columns(view_def=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
view_name=view_name)
return view_def
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
SQL_COLS = """
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT pg_catalog.pg_get_expr(d.adbin, d.adrelid)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
AND a.atthasdef)
AS DEFAULT,
a.attnotnull, a.attnum, a.attrelid as table_oid
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = :table_oid
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
s = sql.text(SQL_COLS,
bindparams=[
sql.bindparam('table_oid', type_=sqltypes.Integer)],
typemap={
'attname': sqltypes.Unicode,
'default': sqltypes.Unicode}
)
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
domains = self._load_domains(connection)
enums = dict(
(
"%s.%s" % (rec['schema'], rec['name'])
if not rec['visible'] else rec['name'], rec) for rec in
self._load_enums(connection, schema='*')
)
# format columns
columns = []
for name, format_type, default, notnull, attnum, table_oid in rows:
column_info = self._get_column_info(
name, format_type, default, notnull, domains, enums, schema)
columns.append(column_info)
return columns
def _get_column_info(self, name, format_type, default,
notnull, domains, enums, schema):
# strip (*) from character varying(5), timestamp(5)
# with time zone, geometry(POLYGON), etc.
attype = re.sub(r'\(.*\)', '', format_type)
# strip '[]' from integer[], etc.
attype = attype.replace('[]', '')
nullable = not notnull
is_array = format_type.endswith('[]')
charlen = re.search('\(([\d,]+)\)', format_type)
if charlen:
charlen = charlen.group(1)
args = re.search('\((.*)\)', format_type)
if args and args.group(1):
args = tuple(re.split('\s*,\s*', args.group(1)))
else:
args = ()
kwargs = {}
if attype == 'numeric':
if charlen:
prec, scale = charlen.split(',')
args = (int(prec), int(scale))
else:
args = ()
elif attype == 'double precision':
args = (53, )
elif attype == 'integer':
args = ()
elif attype in ('timestamp with time zone',
'time with time zone'):
kwargs['timezone'] = True
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype in ('timestamp without time zone',
'time without time zone', 'time'):
kwargs['timezone'] = False
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype == 'bit varying':
kwargs['varying'] = True
if charlen:
args = (int(charlen),)
else:
args = ()
elif attype in ('interval', 'interval year to month',
'interval day to second'):
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif charlen:
args = (int(charlen),)
while True:
if attype in self.ischema_names:
coltype = self.ischema_names[attype]
break
elif attype in enums:
enum = enums[attype]
coltype = ENUM
kwargs['name'] = enum['name']
if not enum['visible']:
kwargs['schema'] = enum['schema']
args = tuple(enum['labels'])
break
elif attype in domains:
domain = domains[attype]
attype = domain['attype']
# A table can't override whether the domain is nullable.
nullable = domain['nullable']
if domain['default'] and not default:
# It can, however, override the default
# value, but can't set it to null.
default = domain['default']
continue
else:
coltype = None
break
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
coltype = self.ischema_names['_array'](coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(attype, name))
coltype = sqltypes.NULLTYPE
# adjust the default value
autoincrement = False
if default is not None:
match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
if match is not None:
autoincrement = True
# the default is related to a Sequence
sch = schema
if '.' not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# "quote schema"
default = match.group(1) + \
('"%s"' % sch) + '.' + \
match.group(2) + match.group(3)
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
if self.server_version_info < (8, 4):
PK_SQL = """
SELECT a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_attribute a
on t.oid=a.attrelid AND %s
WHERE
t.oid = :table_oid and ix.indisprimary = 't'
ORDER BY a.attnum
""" % self._pg_index_any("a.attnum", "ix.indkey")
else:
# unnest() and generate_subscripts() both introduced in
# version 8.4
PK_SQL = """
SELECT a.attname
FROM pg_attribute a JOIN (
SELECT unnest(ix.indkey) attnum,
generate_subscripts(ix.indkey, 1) ord
FROM pg_index ix
WHERE ix.indrelid = :table_oid AND ix.indisprimary
) k ON a.attnum=k.attnum
WHERE a.attrelid = :table_oid
ORDER BY k.ord
"""
t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
cols = [r[0] for r in c.fetchall()]
PK_CONS_SQL = """
SELECT conname
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table_oid AND r.contype = 'p'
ORDER BY 1
"""
t = sql.text(PK_CONS_SQL, typemap={'conname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
name = c.scalar()
return {'constrained_columns': cols, 'name': name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None,
postgresql_ignore_search_path=False, **kw):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
FK_SQL = """
SELECT r.conname,
pg_catalog.pg_get_constraintdef(r.oid, true) as condef,
n.nspname as conschema
FROM pg_catalog.pg_constraint r,
pg_namespace n,
pg_class c
WHERE r.conrelid = :table AND
r.contype = 'f' AND
c.oid = confrelid AND
n.oid = c.relnamespace
ORDER BY 1
"""
# http://www.postgresql.org/docs/9.0/static/sql-createtable.html
FK_REGEX = re.compile(
r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)'
r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?'
r'[\s]?(ON UPDATE '
r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
r'[\s]?(ON DELETE '
r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?'
r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?'
)
t = sql.text(FK_SQL, typemap={
'conname': sqltypes.Unicode,
'condef': sqltypes.Unicode})
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef, conschema in c.fetchall():
m = re.search(FK_REGEX, condef).groups()
constrained_columns, referred_schema, \
referred_table, referred_columns, \
_, match, _, onupdate, _, ondelete, \
deferrable, _, initially = m
if deferrable is not None:
deferrable = True if deferrable == 'DEFERRABLE' else False
constrained_columns = [preparer._unquote_identifier(x)
for x in re.split(
r'\s*,\s*', constrained_columns)]
if postgresql_ignore_search_path:
# when ignoring search path, we use the actual schema
# provided it isn't the "default" schema
if conschema != self.default_schema_name:
referred_schema = conschema
else:
referred_schema = schema
elif referred_schema:
# referred_schema is the schema that we regexp'ed from
# pg_get_constraintdef(). If the schema is in the search
# path, pg_get_constraintdef() will give us None.
referred_schema = \
preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == conschema:
# If the actual schema matches the schema of the table
# we're reflecting, then we will use that.
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [preparer._unquote_identifier(x)
for x in
re.split(r'\s*,\s', referred_columns)]
fkey_d = {
'name': conname,
'constrained_columns': constrained_columns,
'referred_schema': referred_schema,
'referred_table': referred_table,
'referred_columns': referred_columns,
'options': {
'onupdate': onupdate,
'ondelete': ondelete,
'deferrable': deferrable,
'initially': initially,
'match': match
}
}
fkeys.append(fkey_d)
return fkeys
def _pg_index_any(self, col, compare_to):
if self.server_version_info < (8, 1):
# http://www.postgresql.org/message-id/[email protected]
# "In CVS tip you could replace this with "attnum = ANY (indkey)".
# Unfortunately, most array support doesn't work on int2vector in
# pre-8.1 releases, so I think you're kinda stuck with the above
# for now.
# regards, tom lane"
return "(%s)" % " OR ".join(
"%s[%d] = %s" % (compare_to, ind, col)
for ind in range(0, 10)
)
else:
return "%s = ANY(%s)" % (col, compare_to)
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
# cast indkey as varchar since it's an int2vector,
# returned as a list by some drivers such as pypostgresql
if self.server_version_info < (8, 5):
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname, a.attnum, NULL, ix.indkey%s,
%s, am.amname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid = ix.indexrelid
left outer join
pg_attribute a
on t.oid = a.attrelid and %s
left outer join
pg_am am
on i.relam = am.oid
WHERE
t.relkind IN ('r', 'v', 'f', 'm')
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
""" % (
# version 8.3 here was based on observing the
# cast does not work in PG 8.2.4, does work in 8.3.0.
# nothing in PG changelogs regarding this.
"::varchar" if self.server_version_info >= (8, 3) else "",
"i.reloptions" if self.server_version_info >= (8, 2)
else "NULL",
self._pg_index_any("a.attnum", "ix.indkey")
)
else:
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname, a.attnum, c.conrelid, ix.indkey::varchar,
i.reloptions, am.amname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid = ix.indexrelid
left outer join
pg_attribute a
on t.oid = a.attrelid and a.attnum = ANY(ix.indkey)
left outer join
pg_constraint c
on (ix.indrelid = c.conrelid and
ix.indexrelid = c.conindid and
c.contype in ('p', 'u', 'x'))
left outer join
pg_am am
on i.relam = am.oid
WHERE
t.relkind IN ('r', 'v', 'f', 'm')
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
"""
t = sql.text(IDX_SQL, typemap={
'relname': sqltypes.Unicode,
'attname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
indexes = defaultdict(lambda: defaultdict(dict))
sv_idx_name = None
for row in c.fetchall():
(idx_name, unique, expr, prd, col,
col_num, conrelid, idx_key, options, amname) = row
if expr:
if idx_name != sv_idx_name:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s"
% idx_name)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
"Predicate of partial index %s ignored during reflection"
% idx_name)
sv_idx_name = idx_name
has_idx = idx_name in indexes
index = indexes[idx_name]
if col is not None:
index['cols'][col_num] = col
if not has_idx:
index['key'] = [int(k.strip()) for k in idx_key.split()]
index['unique'] = unique
if conrelid is not None:
index['duplicates_constraint'] = idx_name
if options:
index['options'] = dict(
[option.split("=") for option in options])
# it *might* be nice to include that this is 'btree' in the
# reflection info. But we don't want an Index object
# to have a ``postgresql_using`` in it that is just the
# default, so for the moment leaving this out.
if amname and amname != 'btree':
index['amname'] = amname
result = []
for name, idx in indexes.items():
entry = {
'name': name,
'unique': idx['unique'],
'column_names': [idx['cols'][i] for i in idx['key']]
}
if 'duplicates_constraint' in idx:
entry['duplicates_constraint'] = idx['duplicates_constraint']
if 'options' in idx:
entry.setdefault(
'dialect_options', {})["postgresql_with"] = idx['options']
if 'amname' in idx:
entry.setdefault(
'dialect_options', {})["postgresql_using"] = idx['amname']
result.append(entry)
return result
@reflection.cache
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
UNIQUE_SQL = """
SELECT
cons.conname as name,
cons.conkey as key,
a.attnum as col_num,
a.attname as col_name
FROM
pg_catalog.pg_constraint cons
join pg_attribute a
on cons.conrelid = a.attrelid AND
a.attnum = ANY(cons.conkey)
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'u'
"""
t = sql.text(UNIQUE_SQL, typemap={'col_name': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
uniques = defaultdict(lambda: defaultdict(dict))
for row in c.fetchall():
uc = uniques[row.name]
uc["key"] = row.key
uc["cols"][row.col_num] = row.col_name
return [
{'name': name,
'column_names': [uc["cols"][i] for i in uc["key"]]}
for name, uc in uniques.items()
]
@reflection.cache
def get_check_constraints(
self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
CHECK_SQL = """
SELECT
cons.conname as name,
cons.consrc as src
FROM
pg_catalog.pg_constraint cons
WHERE
cons.conrelid = :table_oid AND
cons.contype = 'c'
"""
c = connection.execute(sql.text(CHECK_SQL), table_oid=table_oid)
return [
{'name': name,
'sqltext': src[1:-1]}
for name, src in c.fetchall()
]
def _load_enums(self, connection, schema=None):
schema = schema or self.default_schema_name
if not self.supports_native_enum:
return {}
# Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
-- t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema",
e.enumlabel as "label"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
WHERE t.typtype = 'e'
"""
if schema != '*':
SQL_ENUMS += "AND n.nspname = :schema "
# e.oid gives us label order within an enum
SQL_ENUMS += 'ORDER BY "schema", "name", e.oid'
s = sql.text(SQL_ENUMS, typemap={
'attname': sqltypes.Unicode,
'label': sqltypes.Unicode})
if schema != '*':
s = s.bindparams(schema=schema)
c = connection.execute(s)
enums = []
enum_by_name = {}
for enum in c.fetchall():
key = (enum['schema'], enum['name'])
if key in enum_by_name:
enum_by_name[key]['labels'].append(enum['label'])
else:
enum_by_name[key] = enum_rec = {
'name': enum['name'],
'schema': enum['schema'],
'visible': enum['visible'],
'labels': [enum['label']],
}
enums.append(enum_rec)
return enums
def _load_domains(self, connection):
# Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
not t.typnotnull as "nullable",
t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE t.typtype = 'd'
"""
s = sql.text(SQL_DOMAINS, typemap={'attname': sqltypes.Unicode})
c = connection.execute(s)
domains = {}
for domain in c.fetchall():
# strip (30) from character varying(30)
attype = re.search('([^\(]+)', domain['attype']).group(1)
if domain['visible']:
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overridden by
# a schema with higher precedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = domain['name']
else:
name = "%s.%s" % (domain['schema'], domain['name'])
domains[name] = {
'attype': attype,
'nullable': domain['nullable'],
'default': domain['default']
}
return domains
| apache-2.0 |
noijd/.dot | i3/shell.py | 2 | 2368 | from argparse import ArgumentParser
import os
import re
import i3ipc
def window_classes(window_id):
cmd = 'xprop -id '+ str(window_id)
properties = os.popen(cmd).read()
regex = re.compile(r'^WM_CLASS\(STRING\) = (.*)$', re.MULTILINE)
match = regex.search(properties).groups()
return match[0].replace('"', '').split(', ')
def real_pid_for_window(window_id, parent_id):
connection = i3ipc.Connection()
tree = connection.get_tree()
# Groups the windows with the expected parent PID
windows = []
for node in tree.leaves():
wid = node.window
# TODO regex
cmd = 'xprop -id '+ str(wid) +' | grep -m 1 PID | cut -d " " -f 3'
pid = os.popen(cmd).read().strip()
if parent_id == pid:
windows.append(wid)
children = os.popen('pgrep -P '+ parent_id).read().split()
# Without children it's not necessary to infer anything
if not len(children):
return parent_id
classes = window_classes(window_id)
if 'gnome-terminal' in classes:
# First one is gnome-pty-helper
sorted_children = children[1:]
else:
sorted_children = children
sorted_windows = sorted(windows)
children_wids = dict(zip(sorted_windows, sorted_children))
# TODO debug option
with open('/tmp/i3_py_shell', 'w') as log:
if not len(children):
log.write('CHILDREN: '+ str(children) +"\n")
log.write('Children: '+ str(children) +"\n")
log.write('Sorted children: '+ str(sorted_children) +"\n")
log.write('Windows: '+ str(windows) +"\n")
log.write('Sorted windows: '+ str(sorted_windows) +"\n")
for wid in children_wids.keys():
log.write(str(wid) +" ~> "+ str(children_wids[wid]) +"\n")
log.write("\n")
if 'gnome-terminal' in classes:
log.write("Gnome-terminal... for "+ str(window_id) +"\n")
else:
log.write("Classes: "+ str(classes) +"\n")
log.write(children_wids[window_id])
return children_wids[window_id]
if __name__ == '__main__':
# TODO instructions
parser = ArgumentParser(prog='current.py')
parser.add_argument('--pid', dest='pid')
parser.add_argument('--wid', dest='wid')
args = parser.parse_args()
real_pid = real_pid_for_window(int(args.wid), args.pid)
print(real_pid)
| mit |
chiefspace/udemy-rest-api | udemy_rest_api_section6/resources/item.py | 1 | 2149 | from flask_restful import Resource, reqparse
from flask_jwt import jwt_required
from models.item import ItemModel
class Item(Resource):
parser = reqparse.RequestParser()
parser.add_argument('price',
type=float,
required=True,
help='This field cannot be left blank!'
)
parser.add_argument('store_id',
type=int,
required=True,
help='Every item needs a store id.'
)
@jwt_required()
def get(self, name):
item = ItemModel.find_by_name(name)
if item:
return item.json()
return {'message': 'Item not found'}, 404
def post(self, name):
if ItemModel.find_by_name(name):
return {'message': "An item with name '{}' already exists.".format(name)}, 400
data = Item.parser.parse_args()
item = ItemModel(name, **data)
try:
item.save_to_db()
except:
return {"message": "An error occurred while inserting the item."}, 500
return item.json(), 201
def delete(self, name):
item = ItemModel.find_by_name(name)
if item:
item.delete_from_db()
return {'message': 'Item deleted'}
def put(self, name):
data = Item.parser.parse_args()
item = ItemModel.find_by_name(name)
if item is None:
item = ItemModel(name, **data)
else:
item.price = data['price']
item.save_to_db()
return item.json()
class ItemList(Resource):
def get(self):
return {'items': [item.json() for item in ItemModel.query.all()]} # a list comprehension is used here
# return {'items': list(map(lambda x: x.json(), ItemModel.query.all()))} # same results as above, but with a lambda instead
# the above statement is a mapping of functions to items or elements returned from the database
# the list comprehension method is more pythonic
# so, only use the map with lambda if you are programming in other languages or working with other people programming in other langu | gpl-2.0 |
liulion/mayavi | mayavi/modules/ui/surface.py | 4 | 1631 | """
Traits View definition file.
The view trait of the parent class is extracted from the model definition
file. This file can either be exec()ed or imported. See
core/base.py:Base.trait_view() for what is currently used. Using exec()
allows view changes without needing to restart Mayavi, but is slower than
importing.
"""
# Authors: Prabhu Ramachandran <[email protected]>
# Judah De Paula <[email protected]>
# Copyright (c) 2005-2008, Enthought, Inc.
# License: BSD Style.
from traitsui.api import Item, Group, View, InstanceEditor
from mayavi.components.ui.actor import actor_view, texture_view
view = View(
Group(Item(name='enable_contours', label='Enable Contours'),
Group(Item(name='contour',
style='custom',
enabled_when='object.enable_contours'
),
show_labels=False,
),
show_labels=True,
label='Contours'
),
Group(Item('actor',
resizable=True,
style='custom',
editor=InstanceEditor(view=actor_view)
),
label='Actor',
show_labels=False,
),
Group(Item('actor',
resizable=True,
style='custom',
editor=InstanceEditor(view=texture_view)
),
label='Texturing',
show_labels=False,
),
)
| bsd-3-clause |
dariansk/python_training | fixture/db.py | 1 | 1487 | #import pymysql.cursors
import mysql.connector
from model.group import Group
from model.contact import Contact
class Dbfixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
#self.connection = pymysql.connect(host=host, database=name, user=user, password=password)
self.connection = mysql.connector.connect(host=host, database=name, user=user, password=password)
self.connection.autocommit = True
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close() | apache-2.0 |
taedla01/MissionPlanner | Lib/site-packages/numpy/core/numeric.py | 53 | 71241 | import sys
import warnings
__all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc',
'arange', 'array', 'zeros', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer',
'int_asbuffer', 'where', 'argwhere',
'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
'little_endian', 'require',
'fromiter', 'array_equal', 'array_equiv',
'indices', 'fromfunction',
'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
'seterr', 'geterr', 'setbufsize', 'getbufsize',
'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
'Inf', 'inf', 'infty', 'Infinity',
'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
'ComplexWarning']
if sys.platform == 'cli':
__all__.remove('compare_chararrays')
for n in ['int_asbuffer', 'set_numeric_ops', 'can_cast', 'bitwise_not']:
#print "numeric.py: Temporarily filtering symbol '%s'" % n
__all__.remove(n)
import multiarray
import umath
from umath import *
# Strange hack. For some reason, possibly related to the circular dependencies
# between multiarray and numeric, if 'compare_chararrays' is left in __all__ the
# import fails. But it works if it's imported here.
if sys.platform == 'cli':
from multiarray import compare_chararrays
import numerictypes
from numerictypes import *
if sys.platform != 'cli' and sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
ndarray = multiarray.ndarray
flatiter = multiarray.flatiter
broadcast = multiarray.broadcast
dtype = multiarray.dtype
ufunc = type(sin)
bitwise_not = invert
CLIP = multiarray.CLIP
WRAP = multiarray.WRAP
RAISE = multiarray.RAISE
MAXDIMS = multiarray.MAXDIMS
ALLOW_THREADS = multiarray.ALLOW_THREADS
BUFSIZE = multiarray.BUFSIZE
# originally from Fernando Perez's IPython
def zeros_like(a):
"""
Return an array of zeros with the same shape and type as a given array.
Equivalent to ``a.copy().fill(0)``.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=np.float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
array([ 0., 0., 0.])
"""
if isinstance(a, ndarray):
res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
res.fill(0)
return res
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = zeros(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
def empty_like(a):
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
Returns
-------
out : ndarray
Array of random data with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
if isinstance(a, ndarray):
res = ndarray.__new__(type(a), a.shape, a.dtype, order=a.flags.fnc)
return res
try:
wrap = a.__array_wrap__
except AttributeError:
wrap = None
a = asarray(a)
res = empty(a.shape, a.dtype)
if wrap:
res = wrap(res)
return res
# end Fernando's utilities
def extend_all(module):
adict = {}
for a in __all__:
adict[a] = 1
try:
mall = getattr(module, '__all__')
except AttributeError:
mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
for a in mall:
if a not in adict:
__all__.append(a)
extend_all(umath)
extend_all(numerictypes)
newaxis = None
arange = multiarray.arange
array = multiarray.array
zeros = multiarray.zeros
empty = multiarray.empty
lexsort = multiarray.lexsort
putmask = multiarray.putmask
concatenate = multiarray.concatenate
where = multiarray.where
fastCopyAndTranspose = multiarray._fastCopyAndTranspose
fromstring = multiarray.fromstring
fromfile = multiarray.fromfile
fromiter = multiarray.fromiter
frombuffer = multiarray.frombuffer
if sys.platform != 'cli':
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
int_asbuffer = multiarray.int_asbuffer
set_numeric_ops = multiarray.set_numeric_ops
can_cast = multiarray.can_cast
lexsort = multiarray.lexsort
compare_chararrays = multiarray.compare_chararrays
def asarray(a, dtype=None, order=None):
"""
Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F' for FORTRAN)
memory representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.matrix, np.ndarray)
True
>>> a = np.matrix([[1, 2]])
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
def asanyarray(a, dtype=None, order=None):
"""
Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('F') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.matrix([1, 2])
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
def asfortranarray(a, dtype=None):
"""
Return an array laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type, the default data-type is float64).
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
"""
if requirements is None:
requirements = []
else:
requirements = [x.upper() for x in requirements]
if not requirements:
return asanyarray(a, dtype=dtype)
if 'ENSUREARRAY' in requirements or 'E' in requirements:
subok = False
else:
subok = True
arr = array(a, dtype=dtype, copy=False, subok=subok)
copychar = 'A'
if 'FORTRAN' in requirements or \
'F_CONTIGUOUS' in requirements or \
'F' in requirements:
copychar = 'F'
elif 'CONTIGUOUS' in requirements or \
'C_CONTIGUOUS' in requirements or \
'C' in requirements:
copychar = 'C'
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(copychar)
break
return arr
def isfortran(a):
"""
Returns True if array is arranged in Fortran-order in memory
and dimension > 1.
Parameters
----------
a : ndarray
Input array.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
1-D arrays always evaluate as False.
>>> np.isfortran(np.array([1, 2], order='FORTRAN'))
False
"""
return a.flags.fnc
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : ndarray
Indices of elements that are non-zero. Indices are grouped by element.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``where(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to a.ravel().nonzero()[0].
Parameters
----------
a : ndarray
Input array.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
's' : 1,
'f' : 2}
def _mode_from_name(mode):
if isinstance(mode, type("")):
return _mode_from_name_dict[mode.lower()[0]]
return mode
def correlate(a, v, mode='valid', old_behavior=False):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
old_behavior : bool
If True, uses the old behavior from Numeric, (correlate(a,v) == correlate(v,
a), and the conjugate is not taken for complex arrays). If False, uses
the conventional signal processing definition (see note).
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([ 3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([ 2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([ 0.5, 2. , 3.5, 3. , 0. ])
"""
mode = _mode_from_name(mode)
# the old behavior should be made available under a different name, see thread
# http://thread.gmane.org/gmane.comp.python.numeric.general/12609/focus=12630
if old_behavior:
warnings.warn("""
The old behavior of correlate was deprecated for 1.4.0, and will be completely removed
for NumPy 2.0.
The new behavior fits the conventional definition of correlation: inputs are
never swapped, and the second argument is conjugated for complex arrays.""",
DeprecationWarning)
return multiarray.correlate(a,v,mode)
else:
return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode `same` returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode `valid` returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
Notes
-----
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \\sum_{m = -\\infty}^{\\infty} f[m] g[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([ 0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([ 1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([ 2.5])
"""
a,v = array(a, ndmin=1),array(v, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0 :
raise ValueError('a cannot be empty')
if len(v) == 0 :
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
def outer(a,b):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a, b : array_like, shape (M,), (N,)
First and second input vectors. Inputs are flattened if they
are not already 1-dimensional.
Returns
-------
out : ndarray, shape (M, N)
``out[i, j] = a[i] * b[j]``
References
----------
.. [1] : G. H. Golub and C. F. van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([[a, aa, aaa],
[b, bb, bbb],
[c, cc, ccc]], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return a.ravel()[:,newaxis]*b.ravel()[newaxis,:]
# try to import blas optimized dot if available
try:
# importing this changes the dot function for basic 4 types
# to blas-optimized versions.
from _dotblas import dot, vdot, inner, alterdot, restoredot
except ImportError:
# docstrings are in add_newdocs.py
inner = multiarray.inner
dot = multiarray.dot
def vdot(a, b):
return dot(asarray(a).ravel().conj(), asarray(b).ravel())
def alterdot():
pass
def restoredot():
pass
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : variable type
* integer_like scalar
Number of axes to sum over (applies to both arrays); or
* array_like, shape = (2,), both elements array_like
Axes to be summed over, first sequence applying to ``a``, second
to ``b``.
See Also
--------
numpy.dot
Notes
-----
When there is more than one axis to sum over - and they are not the last
(first) axes of ``a`` (``b``) - the argument ``axes`` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # "Left for reader" (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = range(-axes,0)
axes_b = range(0,axes)
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = len(a.shape)
bs = b.shape
ndb = len(b.shape)
equal = True
if (na != nb): equal = False
else:
for k in xrange(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError, "shape-mismatch for sum"
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
a = asanyarray(a)
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
shift %= n
indexes = concatenate((arange(n-shift,n),arange(n-shift)))
res = a.take(indexes, axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start : int, optional
The axis is rolled until it lies before this position. The default,
0, results in a "complete" roll.
Returns
-------
res : ndarray
Output array.
See Also
--------
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
if axis < 0:
axis += n
if start < 0:
start += n
msg = 'rollaxis: %s (%d) must be >=0 and < %d'
if not (0 <= axis < n):
raise ValueError, msg % ('axis', axis, n)
if not (0 <= start < n+1):
raise ValueError, msg % ('start', start, n+1)
if (axis < start): # it's been removed
start -= 1
if axis==start:
return a
axes = range(0,n)
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa,axisb,axisc=(axis,)*3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n"\
"(dimension must be 2 or 3)"
if (a.shape[0] not in [2,3]) or (b.shape[0] not in [2,3]):
raise ValueError(msg)
if a.shape[0] == 2:
if (b.shape[0] == 2):
cp = a[0]*b[1] - a[1]*b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1]*b[2]
y = -a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
elif a.shape[0] == 3:
if (b.shape[0] == 3):
x = a[1]*b[2] - a[2]*b[1]
y = a[2]*b[0] - a[0]*b[2]
z = a[0]*b[1] - a[1]*b[0]
else:
x = -a[2]*b[1]
y = a[2]*b[0]
z = a[0]*b[1] - a[1]*b[0]
cp = array([x,y,z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0,axisc)
#Use numarray's printing function
from arrayprint import array2string, get_printoptions, set_printoptions
_typelessdata = [int_, float_, complex_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters split the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero, default is False. Very small
is defined by `precision`, if the precision is 8 then
numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([ 0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([ 0.000001, 0. , 2. , 3. ])'
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
typeless = arr.dtype.type in _typelessdata
if arr.__class__ is not ndarray:
cName= arr.__class__.__name__
else:
cName = "array"
if typeless and arr.size:
return cName + "(%s)" % lst
else:
typename=arr.dtype.name
lf = ''
if issubclass(arr.dtype.type, flexible):
if arr.dtype.names:
typename = "%s" % str(arr.dtype)
else:
typename = "'%s'" % str(arr.dtype)
lf = '\n'+' '*len("array(")
return cName + "(%s, %sdtype=%s)" % (lst, lf, typename)
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
The data in the array is returned as a single string. This function is
similar to `array_repr`, the difference being that `array_repr` also
returns information on the kind of array and its data type.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`. The
default is, indirectly, 75.
precision : int, optional
Floating point precision. Default is the current printing precision
(usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
See Also
--------
array2string, array_repr, set_printoptions
Examples
--------
>>> np.array_str(np.arange(3))
'[0 1 2]'
"""
return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
Parameters
----------
f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
the array. If None, the function is reset to the default NumPy function
to print arrays.
repr : bool, optional
If True (default), the function for pretty printing (``__repr__``)
is set, if False the function that returns the default string
representation (``__str__``) is set.
See Also
--------
set_printoptions, get_printoptions
Examples
--------
>>> def pprint(arr):
... return 'HA! - What are you going to do now?'
...
>>> np.set_string_function(pprint)
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
>>> print a
[0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
>>> np.set_string_function(None)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
`repr` affects either pretty printing or normal string representation.
Note that ``__repr__`` is still affected by setting ``__str__``
because the width of each array element in the returned string becomes
equal to the length of the result of ``__str__()``.
>>> x = np.arange(4)
>>> np.set_string_function(lambda x:'random', repr=False)
>>> x.__str__()
'random'
>>> x.__repr__()
'array([ 0, 1, 2, 3])'
"""
if f is None:
if repr:
return multiarray.set_string_function(array_repr, 1)
else:
return multiarray.set_string_function(array_str, 0)
else:
return multiarray.set_string_function(f, repr)
set_string_function(array_str, 0)
set_string_function(array_repr, 1)
little_endian = (sys.byteorder == 'little')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
See Also
--------
mgrid, meshgrid
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
dimensions = tuple(dimensions)
N = len(dimensions)
if N == 0:
return array([],dtype=dtype)
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
tmp = arange(dim,dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, each of which
represents the coordinates of the array varying along a
specific axis. For example, if `shape` were ``(2, 2)``, then
the parameters would be two arrays, ``[[0, 0], [1, 1]]`` and
``[[0, 1], [0, 1]]``. `function` must be capable of operating on
arrays, and should return a scalar value.
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
Returns
-------
out : any
The result of the call to `function` is passed back directly.
Therefore the type and shape of `out` is completely determined by
`function`.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `shape` and `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]], dtype=bool)
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args,**kwargs)
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
Parameters
----------
num : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `num` is a scalar type, False if it is not.
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
"""
if isinstance(num, generic):
return True
else:
return type(num) in ScalarType
_lkup = {
'0':'0000',
'1':'0001',
'2':'0010',
'3':'0011',
'4':'0100',
'5':'0101',
'6':'0110',
'7':'0111',
'8':'1000',
'9':'1001',
'a':'1010',
'b':'1011',
'c':'1100',
'd':'1101',
'e':'1110',
'f':'1111',
'A':'1010',
'B':'1011',
'C':'1100',
'D':'1101',
'E':'1110',
'F':'1111',
'L':''}
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, the length of
the two's complement if `num` is negative.
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=4)
'1101'
"""
sign = ''
if num < 0:
if width is None:
sign = '-'
num = -num
else:
# replace num with its 2-complement
num = 2**width + num
elif num == 0:
return '0'*(width or 1)
ostr = hex(num)
bin = ''.join([_lkup[ch] for ch in ostr[2:]])
bin = bin.lstrip('0')
if width is not None:
bin = bin.zfill(width)
return sign + bin
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Only positive values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
from cPickle import load, loads
_cload = load
_file = open
def load(file):
"""
Wrapper around cPickle.load which accepts either a file-like object or
a filename.
Note that the NumPy binary format is not based on pickle/cPickle anymore.
For details on the preferred way of loading and saving files, see `load`
and `save`.
See Also
--------
load, save
"""
if isinstance(file, type("")):
file = _file(file,"rb")
return _cload(file)
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0],val) for name in dt.names]
return tuple(res)
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
Please refer to the documentation for `zeros` for further details.
See Also
--------
zeros, ones_like
Examples
--------
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=np.int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[ 1.],
[ 1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[ 1., 1.],
[ 1., 1.]])
"""
a = empty(shape, dtype, order)
try:
a.fill(1)
# Above is faster now after addition of fast loops.
#a = zeros(shape, dtype, order)
#a+=1
except TypeError:
obj = _maketup(dtype, 1)
a.fill(obj)
return a
def identity(n, dtype=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
a = zeros((n,n), dtype=dtype)
a.flat[::n+1] = 1
return a
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
Returns
-------
y : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise. If either array contains NaN, then
False is returned.
See Also
--------
all, any, alltrue, sometrue
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`allclose(a, b)` might be different from `allclose(b, a)` in
some rare cases.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
"""
x = array(a, copy=False)
y = array(b, copy=False)
xinf = isinf(x)
if not all(xinf == isinf(y)):
return False
if not any(xinf):
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
if not all(x[xinf] == y[xinf]):
return False
x = x[~xinf]
y = y[~xinf]
return all(less_equal(absolute(x-y), atol + rtol * absolute(y)))
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(logical_and.reduce(equal(a1,a2).ravel()))
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except:
return False
try:
return bool(logical_and.reduce(equal(a1,a2).ravel()))
except ValueError:
return False
_errdict = {"ignore":ERR_IGNORE,
"warn":ERR_WARN,
"raise":ERR_RAISE,
"call":ERR_CALL,
"print":ERR_PRINT,
"log":ERR_LOG}
_errdict_rev = {}
for key in _errdict.keys():
_errdict_rev[_errdict[key]] = key
del key
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] http://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.seterr(all='ignore') # reset to default
{'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
>>> np.int16(32000) * np.int16(3)
Warning: overflow encountered in short_scalars
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None: divide = all or old['divide']
if over is None: over = all or old['over']
if under is None: under = all or old['under']
if invalid is None: invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW ) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr() # default is all set to 'ignore'
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
>>> np.arange(3.) / np.arange(3.)
array([ NaN, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
>>> np.arange(3.) / np.arange(3.)
__main__:1: RuntimeWarning: invalid value encountered in divide
array([ NaN, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError, "Buffer size, %s, is too big." % size
if size < 5:
raise ValueError, "Buffer size, %s, is too small." %size
if size % 16 != 0:
raise ValueError, "Buffer size, %s, is not a multiple of 16." %size
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
def getbufsize():
"""Return the size of the buffer used in ufuncs.
"""
return umath.geterrobj()[0]
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is the
type of error (one of "divide", "over", "under", or "invalid"),
and the second is the status flag. The flag is a byte, whose
least-significant bits indicate the status::
[0 0 0 0 invalid over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
Log error message:
>>> class Log(object):
... def write(self, msg):
... print "LOG: %s" % msg
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
<BLANKLINE>
array([ Inf, Inf, Inf])
>>> np.seterrcall(saved_handler)
<__main__.Log object at 0x...>
>>> np.seterr(**save_err)
{'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
"""
if func is not None and not callable(func):
if not hasattr(func, 'write') or not callable(func.write):
raise ValueError, "Only callable can be used as callback"
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([ Inf, Inf, Inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified(object):
pass
_Unspecified = _unspecified()
class errstate(object):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
The ``with`` statement was introduced in Python 2.5, and can only be used
there by importing it: ``from __future__ import with_statement``. In
earlier Python versions the ``with`` statement is not available.
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([ NaN, Inf, Inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
...
__main__:2: RuntimeWarning: divide by zero encountered in divide
array([ NaN, Inf, Inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
def __init__(self, **kwargs):
self.call = kwargs.pop('call',_Unspecified)
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT2, None]
umath.seterrobj(defval)
# set the default values
_setdef()
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
import fromnumeric
from fromnumeric import *
extend_all(fromnumeric)
| gpl-3.0 |
jmlong1027/multiscanner | storage/file.py | 1 | 2965 | import codecs
import gzip
import json
import storage
class File(storage.Storage):
DEFAULTCONF = {
'ENABLED': True,
'path': 'report.json',
'gzip': False,
'files-per-line': 1
}
def setup(self):
if self.config['gzip'] is True:
self.file_handle = gzip.open(self.config['path'], 'a')
else:
self.file_handle = codecs.open(self.config['path'], 'ab', 'utf-8')
return True
def store(self, results):
if self.config['files-per-line'] and self.config['files-per-line'] > 0:
writedata = {}
metadata = None
if ['Files', 'Metadata'] == results.keys():
metadata = results['Metadata']
results = results['Files']
i = 0
for filename in results:
writedata[filename] = results[filename]
i += 1
if i >= self.config['files-per-line']:
if metadata:
writedata = {'Files': writedata, 'Metadata': metadata}
if self.config['gzip'] is True:
self.file_handle.write(
json.dumps(writedata, sort_keys=True, separators=(',', ':'),
ensure_ascii=False).encode('utf8', 'replace'))
self.file_handle.write(b'\n')
else:
self.file_handle.write(
json.dumps(writedata, sort_keys=True, separators=(',', ':'),
ensure_ascii=False))
self.file_handle.write('\n')
i = 0
writedata = {}
if writedata:
if metadata:
writedata = {'Files': writedata, 'Metadata': metadata}
if self.config['gzip'] is True:
self.file_handle.write(
json.dumps(writedata, sort_keys=True, separators=(',', ':'),
ensure_ascii=False).encode('utf8', 'replace'))
self.file_handle.write(b'\n')
else:
self.file_handle.write(
json.dumps(writedata, sort_keys=True, separators=(',', ':'),
ensure_ascii=False))
self.file_handle.write('\n')
else:
if self.config['gzip'] is True:
self.file_handle.write(
json.dumps(results, sort_keys=True, separators=(',', ':'),
ensure_ascii=False).encode('utf8', 'replace'))
self.file_handle.write(b'\n')
else:
self.file_handle.write(
json.dumps(results, sort_keys=True, separators=(',', ':'),
ensure_ascii=False))
self.file_handle.write('\n')
def teardown(self):
self.file_handle.close()
| mpl-2.0 |
kdeldycke/meta-package-manager | meta_package_manager/tests/test_cli_cleanup.py | 1 | 1262 | # Copyright Kevin Deldycke <[email protected]> and contributors.
# All Rights Reserved.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=redefined-outer-name
import pytest
from .conftest import MANAGER_IDS
from .test_cli import CLISubCommandTests
@pytest.fixture
def subcmd():
return "cleanup"
class TestCleanup(CLISubCommandTests):
@pytest.mark.parametrize("mid", MANAGER_IDS)
def test_single_manager(self, invoke, subcmd, mid):
result = invoke("--manager", mid, subcmd)
assert result.exit_code == 0
self.check_manager_selection(result, {mid})
| gpl-2.0 |
thomas-schmid-ubnt/avocado | selftests/functional/test_replay_basic.py | 1 | 8628 | import glob
import os
import tempfile
import shutil
import unittest
from avocado.core import exit_codes
from avocado.utils import process
basedir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')
basedir = os.path.abspath(basedir)
AVOCADO = os.environ.get("UNITTEST_AVOCADO_CMD", "./scripts/avocado")
class ReplayTests(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
cmd_line = ('%s run passtest.py '
'-m examples/tests/sleeptest.py.data/sleeptest.yaml '
'--job-results-dir %s --sysinfo=off --json -'
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
self.jobdir = ''.join(glob.glob(os.path.join(self.tmpdir, 'job-*')))
idfile = ''.join(os.path.join(self.jobdir, 'id'))
with open(idfile, 'r') as f:
self.jobid = f.read().strip('\n')
def run_and_check(self, cmd_line, expected_rc):
os.chdir(basedir)
result = process.run(cmd_line, ignore_status=True)
self.assertEqual(result.exit_status, expected_rc,
"Command %s did not return rc "
"%d:\n%s" % (cmd_line, expected_rc, result))
return result
def test_run_replay_noid(self):
"""
Runs a replay job with an invalid jobid.
"""
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, 'foo', self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_latest(self):
"""
Runs a replay job using the 'latest' keyword.
"""
cmd_line = ('%s run --replay latest --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_data(self):
"""
Checks if all expected files are there.
"""
file_list = ['multiplex', 'config', 'test_references', 'pwd', 'args',
'cmdline']
for filename in file_list:
path = os.path.join(self.jobdir, 'jobdata', filename)
self.assertTrue(glob.glob(path))
def test_run_replay(self):
"""
Runs a replay job.
"""
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_partialid(self):
"""
Runs a replay job with a partial jobid.
"""
partial_id = self.jobid[:5]
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, partial_id, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_results_as_jobid(self):
"""
Runs a replay job identifying the job by its results directory.
"""
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobdir, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_invalidignore(self):
"""
Runs a replay job with an invalid option for '--replay-ignore'
"""
cmd_line = ('%s run --replay %s --replay-ignore foo'
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-ignore option. Valid options are ' \
'(more than one allowed): variants,config'
self.assertIn(msg, result.stderr)
def test_run_replay_ignorevariants(self):
"""
Runs a replay job ignoring the variants.
"""
cmd_line = ('%s run --replay %s --replay-ignore variants '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Ignoring variants from source job with --replay-ignore.'
self.assertIn(msg, result.stderr)
def test_run_replay_invalidstatus(self):
"""
Runs a replay job with an invalid option for '--replay-test-status'
"""
cmd_line = ('%s run --replay %s --replay-test-status E '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = 'Invalid --replay-test-status option. Valid options are (more ' \
'than one allowed): SKIP,ERROR,FAIL,WARN,PASS,INTERRUPTED'
self.assertIn(msg, result.stderr)
def test_run_replay_statusfail(self):
"""
Runs a replay job only with tests that failed.
"""
cmd_line = ('%s run --replay %s --replay-test-status '
'FAIL --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
result = self.run_and_check(cmd_line, expected_rc)
msg = 'RESULTS : PASS 0 | ERROR 0 | FAIL 0 | SKIP 4 | WARN 0 | INTERRUPT 0'
self.assertIn(msg, result.stdout)
def test_run_replay_remotefail(self):
"""
Runs a replay job using remote plugin (not supported).
"""
cmd_line = ('%s run --replay %s --remote-hostname '
'localhost --job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = "Currently we don't replay jobs in remote hosts."
self.assertIn(msg, result.stderr)
def test_run_replay_status_and_variants(self):
"""
Runs a replay job with custom variants using '--replay-test-status'
"""
cmd_line = ('%s run --replay %s --replay-ignore variants '
'--replay-test-status FAIL --job-results-dir %s '
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option `--replay-test-status` is incompatible with "
"`--replay-ignore variants`")
self.assertIn(msg, result.stderr)
def test_run_replay_status_and_references(self):
"""
Runs a replay job with custom test references and --replay-test-status
"""
cmd_line = ('%s run sleeptest --replay %s '
'--replay-test-status FAIL --job-results-dir %s '
'--sysinfo=off' % (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_FAIL
result = self.run_and_check(cmd_line, expected_rc)
msg = ("Option --replay-test-status is incompatible with "
"test references given on the command line.")
self.assertIn(msg, result.stderr)
def test_run_replay_fallbackdir(self):
"""
Runs a replay job with the fallback job data directory name.
"""
shutil.move(os.path.join(self.jobdir, 'jobdata'),
os.path.join(self.jobdir, 'replay'))
cmd_line = ('%s run --replay %s '
'--job-results-dir %s --sysinfo=off'
% (AVOCADO, self.jobid, self.tmpdir))
expected_rc = exit_codes.AVOCADO_ALL_OK
self.run_and_check(cmd_line, expected_rc)
def test_run_replay_and_mux(self):
"""
Runs a replay job and specifies multiplex file (which should be
ignored)
"""
cmdline = ("%s run --replay %s --job-results-dir %s "
"--sysinfo=off -m examples/mux-selftest.yaml"
% (AVOCADO, self.jobid, self.tmpdir))
self.run_and_check(cmdline, exit_codes.AVOCADO_ALL_OK)
def tearDown(self):
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
zhangyongfei/StudySkia | gm/rebaseline_server/compare_rendered_pictures_test.py | 1 | 3997 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test compare_rendered_pictures.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
1. examine the results in self._output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
3. mv self._output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
import os
import subprocess
import sys
# Imports from within Skia
import base_unittest
import compare_rendered_pictures
import results
import gm_json # must import results first, so that gm_json will be in sys.path
class CompareRenderedPicturesTest(base_unittest.TestCase):
def test_endToEnd(self):
"""Generate two sets of SKPs, run render_pictures over both, and compare
the results."""
self._generate_skps_and_run_render_pictures(
subdir='before_patch', skpdict={
'changed.skp': 200,
'unchanged.skp': 100,
'only-in-before.skp': 128,
})
self._generate_skps_and_run_render_pictures(
subdir='after_patch', skpdict={
'changed.skp': 201,
'unchanged.skp': 100,
'only-in-after.skp': 128,
})
results_obj = compare_rendered_pictures.RenderedPicturesComparisons(
actuals_root=self._temp_dir,
subdirs=('before_patch', 'after_patch'),
generated_images_root=self._temp_dir,
diff_base_url='/static/generated-images')
results_obj.get_timestamp = mock_get_timestamp
gm_json.WriteToFile(
results_obj.get_packaged_results_of_type(
results.KEY__HEADER__RESULTS_ALL),
os.path.join(self._output_dir_actual, 'compare_rendered_pictures.json'))
def _generate_skps_and_run_render_pictures(self, subdir, skpdict):
"""Generate SKPs and run render_pictures on them.
Args:
subdir: subdirectory (within self._temp_dir) to write all files into
skpdict: {skpname: redvalue} dictionary describing the SKP files to render
"""
out_path = os.path.join(self._temp_dir, subdir)
os.makedirs(out_path)
for skpname, redvalue in skpdict.iteritems():
self._run_skpmaker(
output_path=os.path.join(out_path, skpname), red=redvalue)
# TODO(epoger): Add --mode tile 256 256 --writeWholeImage to the unittest,
# and fix its result! (imageURLs within whole-image entries are wrong when
# I tried adding that)
binary = self.find_path_to_program('render_pictures')
return subprocess.check_output([
binary,
'--config', '8888',
'-r', out_path,
'--writeChecksumBasedFilenames',
'--writeJsonSummaryPath', os.path.join(out_path, 'summary.json'),
'--writePath', out_path])
def _run_skpmaker(self, output_path, red=0, green=0, blue=0,
width=640, height=400):
"""Runs the skpmaker binary to generate SKP with known characteristics.
Args:
output_path: Filepath to write the SKP into.
red: Value of red color channel in image, 0-255.
green: Value of green color channel in image, 0-255.
blue: Value of blue color channel in image, 0-255.
width: Width of canvas to create.
height: Height of canvas to create.
"""
binary = self.find_path_to_program('skpmaker')
return subprocess.check_output([
binary,
'--red', str(red),
'--green', str(green),
'--blue', str(blue),
'--width', str(width),
'--height', str(height),
'--writePath', str(output_path)])
def mock_get_timestamp():
"""Mock version of BaseComparisons.get_timestamp() for testing."""
return 12345678
def main():
base_unittest.main(CompareRenderedPicturesTest)
if __name__ == '__main__':
main()
| bsd-3-clause |
entropy1337/infernal-twin | Modules/build/pillow/PIL/XpmImagePlugin.py | 52 | 3089 | #
# The Python Imaging Library.
# $Id$
#
# XPM File handling
#
# History:
# 1996-12-29 fl Created
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
#
# Copyright (c) Secret Labs AB 1997-2001.
# Copyright (c) Fredrik Lundh 1996-2001.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
import re
from PIL import Image, ImageFile, ImagePalette
from PIL._binary import i8, o8
# XPM header
xpm_head = re.compile(b"\"([0-9]*) ([0-9]*) ([0-9]*) ([0-9]*)")
def _accept(prefix):
return prefix[:9] == b"/* XPM */"
##
# Image plugin for X11 pixel maps.
class XpmImageFile(ImageFile.ImageFile):
format = "XPM"
format_description = "X11 Pixel Map"
def _open(self):
if not _accept(self.fp.read(9)):
raise SyntaxError("not an XPM file")
# skip forward to next string
while True:
s = self.fp.readline()
if not s:
raise SyntaxError("broken XPM file")
m = xpm_head.match(s)
if m:
break
self.size = int(m.group(1)), int(m.group(2))
pal = int(m.group(3))
bpp = int(m.group(4))
if pal > 256 or bpp != 1:
raise ValueError("cannot read this XPM file")
#
# load palette description
palette = [b"\0\0\0"] * 256
for i in range(pal):
s = self.fp.readline()
if s[-2:] == b'\r\n':
s = s[:-2]
elif s[-1:] in b'\r\n':
s = s[:-1]
c = i8(s[1])
s = s[2:-2].split()
for i in range(0, len(s), 2):
if s[i] == b"c":
# process colour key
rgb = s[i+1]
if rgb == b"None":
self.info["transparency"] = c
elif rgb[0:1] == b"#":
# FIXME: handle colour names (see ImagePalette.py)
rgb = int(rgb[1:], 16)
palette[c] = (o8((rgb >> 16) & 255) +
o8((rgb >> 8) & 255) +
o8(rgb & 255))
else:
# unknown colour
raise ValueError("cannot read this XPM file")
break
else:
# missing colour key
raise ValueError("cannot read this XPM file")
self.mode = "P"
self.palette = ImagePalette.raw("RGB", b"".join(palette))
self.tile = [("raw", (0, 0)+self.size, self.fp.tell(), ("P", 0, 1))]
def load_read(self, bytes):
#
# load all image data in one chunk
xsize, ysize = self.size
s = [None] * ysize
for i in range(ysize):
s[i] = self.fp.readline()[1:xsize+1].ljust(xsize)
self.fp = None
return b"".join(s)
#
# Registry
Image.register_open("XPM", XpmImageFile, _accept)
Image.register_extension("XPM", ".xpm")
Image.register_mime("XPM", "image/xpm")
| gpl-3.0 |
proticom/google-python-exercises | copyspecial/solution/copyspecial.py | 206 | 2584 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
# LAB(begin solution)
def get_special_paths(dirname):
"""Given a dirname, returns a list of all its special files."""
result = []
paths = os.listdir(dirname) # list of paths in that dir
for fname in paths:
match = re.search(r'__(\w+)__', fname)
if match:
result.append(os.path.abspath(os.path.join(dirname, fname)))
return result
def copy_to(paths, to_dir):
"""Copy all of the given files to the given dir, creating it if necessary."""
if not os.path.exists(to_dir):
os.mkdir(to_dir)
for path in paths:
fname = os.path.basename(path)
shutil.copy(path, os.path.join(to_dir, fname))
# could error out if already exists os.path.exists():
def zip_to(paths, zipfile):
"""Zip up all of the given files into a new zip file with the given name."""
cmd = 'zip -j ' + zipfile + ' ' + ' '.join(paths)
print "Command I'm going to do:" + cmd
(status, output) = commands.getstatusoutput(cmd)
# If command had a problem (status is non-zero),
# print its output to stderr and exit.
if status:
sys.stderr.write(output)
sys.exit(1)
# LAB(end solution)
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
# +++your code here+++
# Call your functions
# LAB(begin solution)
# Gather all the special files
paths = []
for dirname in args:
paths.extend(get_special_paths(dirname))
if todir:
copy_to(paths, todir)
elif tozip:
zip_to(paths, tozip)
else:
print '\n'.join(paths)
# LAB(end solution)
if __name__ == "__main__":
main()
| apache-2.0 |
varunnaganathan/django | django/conf/locale/it/formats.py | 504 | 2079 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i' # 14:30
DATETIME_FORMAT = 'l d F Y H:i' # Mercoledì 25 Ottobre 2006 14:30
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/m/Y H:i' # 25/10/2009 14:30
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25'
'%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
glyph/imaginary | imaginary/test/test_look.py | 1 | 8911 | """
Tests for L{imaginary.action.LookAt} and L{imaginary.action.LookAround}.
"""
from __future__ import print_function
from textwrap import dedent
from twisted.trial.unittest import TestCase
from zope.interface import implementer
from characteristic import attributes as has_attributes
from axiom import store, item, attributes
from imaginary import iimaginary, objects, language, action, events
from imaginary.enhancement import Enhancement
from imaginary.world import ImaginaryWorld
from imaginary.test.commandutils import (
CommandTestCaseMixin, E, createLocation, flatten)
class TestIntelligence(object):
def __init__(self):
self.observedConcepts = []
def prepare(self, concept):
return lambda: self.observedConcepts.append(concept)
class LookContext(object):
def __init__(self):
self.store = store.Store()
locContainer = createLocation(
self.store, name=u"Test Location",
description=u"Location for testing.")
self.location = locContainer.thing
self.world = ImaginaryWorld(store=self.store)
self.player = self.world.create(u"Test Player", gender=language.Gender.FEMALE)
locContainer.add(self.player)
self.actor = iimaginary.IActor(self.player)
self.actor.setEphemeralIntelligence(TestIntelligence())
class LookAroundTranscriptTests(CommandTestCaseMixin, TestCase):
"""
Transcript-style tests for I{look}.
"""
def test_emptyLocation(self):
iimaginary.IContainer(self.location).remove(self.observer)
self._test(
u"look",
[E(u"[ Test Location ]"),
u"Location for testing.",
])
def test_siblingObject(self):
self._test(
"look",
[E(u"[ Test Location ]"),
u"Location for testing.",
u"Here, you see Observer Player."])
def test_cousinObject(self):
o = objects.Thing(store=self.store, name=u"foo")
iimaginary.IContainer(self.observer).add(o)
self._test(
"look",
[E(u"[ Test Location ]"),
u"Location for testing.",
u"Here, you see Observer Player."])
def test_childObject(self):
o = objects.Thing(store=self.store, name=u"foo")
self.playerContainer.add(o)
self._test(
"look",
[E(u"[ Test Location ]"),
u"Location for testing.",
u"Here, you see Observer Player."])
def test_equipment(self):
self.observer.moveTo(None)
self._test(u"create a shirt named t-shirt", [u"You create a t-shirt."])
self._test(u"wear t-shirt", [u"You put on the t-shirt."])
self._test(
u"look",
[E(u"[ Test Location ]"),
E(u"Location for testing.")])
@implementer(iimaginary.ILitLink)
@has_attributes(["bear"])
class BearsHiddenBeyondThisLink(object):
"""
An annotation for a link implementing L{BearBlindness}.
"""
def isItLit(self, path):
"""
Any path that passes through a L{BearsHiddenBeyondThisLink} link and
terminates in a bear is shrouded in darkness. The bear lies in wait.
"""
schroedingerBear = path.targetAs(iimaginary.IThing)
actualBear = self.bear
if schroedingerBear == actualBear:
return False
else:
return True
def whyNotLit(self):
"""
The reason that a bear is obscured is L{BearsWhyNot}.
"""
return BearsWhyNot()
def applyLighting(self, litThing, it, interface):
"""
L{iimaginary.ILitLink.applyLighting} can modify a target that has had
lighting applied to it; in the case of this annotation things are
either completely not lit at all (bears) or fully lit and appear normal
(everything else) so we just always return the thing itself and don't
modify it.
"""
return it
class BearsWhyNot(object):
"""
A reason you can't see something: it's a bear, and you're blind to bears,
that's why you can't see it.
"""
def tellMeWhyNot(self):
"""
An evocative message that the user probably won't see (since they can't
in fact see this bear).
"""
return u"IT'S A BEAR"
interfaces = [iimaginary.ILinkAnnotator]
@implementer(*interfaces)
class BearBlindness(item.Item, Enhancement):
"""
An enhancement for an actor which causes that actor to become unable to see
bears.
(This could be installed on something other than an actor, which would
cause all bears on the other side of whatever link it was to become
invisible to all.)
"""
powerupInterfaces = interfaces
thing = attributes.reference(
"""
This is a reference to a Thing which is blind to bears.
"""
)
bear = attributes.reference(
"""
This is a reference to a Thing which is the one and only bear in the
universe, which you cannot see.
THERE CAN ONLY BE ONE.
"""
)
def annotationsFor(self, link, idea):
"""
Yield an annotation for all links which causes bears on the opposite
side of you to be invisible to you.
"""
yield BearsHiddenBeyondThisLink(bear=self.bear)
class LookAtTranscriptTests(CommandTestCaseMixin, TestCase):
def test_bearBlindness(self):
"""
If I cast a spell on you which makes you unable to see bears, you
should not see a bear in the room with you when you look at the room
around you.
"""
bear = objects.Thing(store=self.store,
name=u"Bear",
location=self.location)
BearBlindness(store=self.store,
thing=self.player,
bear=bear).applyEnhancement()
self._test(
"look here",
[E("[ Test Location ]"),
E("Location for testing."),
"Here, you see Observer Player."])
def test_exits(self):
objects.Exit.link(self.location, self.location, u"north")
self._test(
"look here",
[E("[ Test Location ]"),
E("( north south )"),
E("Location for testing."),
"Here, you see Observer Player."])
def test_lookMe(self):
self._test(
"look me",
[E("[ Test Player ]"),
"Test Player is great.",
"She is naked."])
def test_lookAtMe(self):
self._test(
"look at me",
[E("[ Test Player ]"),
"Test Player is great.",
"She is naked."])
def test_lookAtAnother(self):
self._test(
"look at Observer Player",
[E("[ Observer Player ]"),
"Observer Player is great.",
"She is naked."],
["Test Player looks at you."])
def test_lookAtThing(self):
o = objects.Thing(store=self.store, name=u"foo")
iimaginary.IContainer(self.location).add(o)
self._test(
"look at foo",
[E("[ foo ]")])
def test_lookAtMissing(self):
self._test(
"look at bar",
["You don't see that."])
class LookAroundTests(TestCase):
"""
Tests for L{imaginary.action.LookAround}.
"""
def setUp(self):
self.context = LookContext()
def test_eventBroadcasting(self):
"""
The L{LookAround} action broadcasts an L{events.Success} to the actor.
"""
action.LookAround().runEventTransaction(
self.context.player, u"look", {})
[event] = self.context.actor.getIntelligence().observedConcepts
self.assertIsInstance(event, events.Success)
class LookAtTests(TestCase):
"""
Tests for L{imaginary.action.LookAt}.
"""
def setUp(self):
self.context = LookContext()
def test_exitNameEventBroadcasting(self):
target = objects.Thing(
store=self.context.store,
name=u"Visible Location",
description=u"Description of visible location.",
proper=True)
objects.Container.createFor(target, capacity=1000)
objects.Exit.link(self.context.location, target, u"south")
action.LookAt().runEventTransaction(
self.context.player, u"look", {"target": u"south"})
evts = self.context.actor.getIntelligence().observedConcepts
self.assertEqual(1, len(evts))
self.assertIsInstance(evts[0], events.Success)
self.assertEqual(
dedent(u"""
[ Visible Location ]
( north )
Description of visible location.
""").lstrip(),
flatten(evts[0].actorMessage.plaintext(self.context.actor.thing)))
| mit |
alexanderturner/ansible | lib/ansible/modules/database/vertica/vertica_user.py | 19 | 14854 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: vertica_user
version_added: '2.0'
short_description: Adds or removes Vertica database users and assigns roles.
description:
- Adds or removes Vertica database user and, optionally, assigns roles.
- A user will not be removed until all the dependencies have been dropped.
- In such a situation, if the module tries to remove the user it
will fail and only remove roles granted to the user.
options:
name:
description:
- Name of the user to add or remove.
required: true
profile:
description:
- Sets the user's profile.
required: false
default: null
resource_pool:
description:
- Sets the user's resource pool.
required: false
default: null
password:
description:
- The user's password encrypted by the MD5 algorithm.
- The password must be generated with the format C("md5" + md5[password + username]),
resulting in a total of 35 characters. An easy way to do this is by querying
the Vertica database with select 'md5'||md5('<user_password><user_name>').
required: false
default: null
expired:
description:
- Sets the user's password expiration.
required: false
default: null
ldap:
description:
- Set to true if users are authenticated via LDAP.
- The user will be created with password expired and set to I($ldap$).
required: false
default: null
roles:
description:
- Comma separated list of roles to assign to the user.
aliases: ['role']
required: false
default: null
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a user.
required: false
choices: ['present', 'absent', 'locked']
default: present
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica user with password
vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
- name: creating a new vertica user authenticated via ldap with roles assigned
vertica_user:
name=user_name
ldap=true
db=db_name
roles=schema_name_ro
state=present
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def update_roles(user_facts, cursor, user,
existing_all, existing_default, required):
del_roles = list(set(existing_all) - set(required))
if del_roles:
cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
new_roles = list(set(required) - set(existing_all))
if new_roles:
cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
if required:
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
def check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
return False
if profile and profile != user_facts[user_key]['profile']:
return False
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
return False
if locked != (user_facts[user_key]['locked'] == 'True'):
return False
if password and password != user_facts[user_key]['password']:
return False
if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'):
return False
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
return False
return True
def present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
query_fragments = ["create user {0}".format(user)]
if locked:
query_fragments.append("account lock")
if password or ldap:
if password:
query_fragments.append("identified by '{0}'".format(password))
else:
query_fragments.append("identified by '$ldap$'")
if expired or ldap:
query_fragments.append("password expire")
if profile:
query_fragments.append("profile {0}".format(profile))
if resource_pool:
query_fragments.append("resource pool {0}".format(resource_pool))
cursor.execute(' '.join(query_fragments))
if resource_pool and resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
update_roles(user_facts, cursor, user, [], [], roles)
user_facts.update(get_user_facts(cursor, user))
return True
else:
changed = False
query_fragments = ["alter user {0}".format(user)]
if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
if locked:
state = 'lock'
else:
state = 'unlock'
query_fragments.append("account {0}".format(state))
changed = True
if password and password != user_facts[user_key]['password']:
query_fragments.append("identified by '{0}'".format(password))
changed = True
if ldap:
if ldap != (user_facts[user_key]['expired'] == 'True'):
query_fragments.append("password expire")
changed = True
elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
if expired:
query_fragments.append("password expire")
changed = True
else:
raise NotSupportedError("Unexpiring user password is not supported.")
if profile and profile != user_facts[user_key]['profile']:
query_fragments.append("profile {0}".format(profile))
changed = True
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
query_fragments.append("resource pool {0}".format(resource_pool))
if user_facts[user_key]['resource_pool'] != 'general':
cursor.execute("revoke usage on resource pool {0} from {1}".format(
user_facts[user_key]['resource_pool'], user))
if resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
changed = True
if changed:
cursor.execute(' '.join(query_fragments))
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
changed = True
if changed:
user_facts.update(get_user_facts(cursor, user))
return changed
def absent(user_facts, cursor, user, roles):
user_key = user.lower()
if user_key in user_facts:
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
try:
cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
except pyodbc.Error:
raise CannotDropError("Dropping user failed due to dependencies.")
del user_facts[user_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True, aliases=['name']),
profile=dict(default=None),
resource_pool=dict(default=None),
password=dict(default=None, no_log=True),
expired=dict(type='bool', default=None),
ldap=dict(type='bool', default=None),
roles=dict(default=None, aliases=['role']),
state=dict(default='present', choices=['absent', 'present', 'locked']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
user = module.params['user']
profile = module.params['profile']
if profile:
profile = profile.lower()
resource_pool = module.params['resource_pool']
if resource_pool:
resource_pool = resource_pool.lower()
password = module.params['password']
expired = module.params['expired']
ldap = module.params['ldap']
roles = []
if module.params['roles']:
roles = module.params['roles'].split(',')
roles = filter(None, roles)
state = module.params['state']
if state == 'locked':
locked = True
else:
locked = False
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception:
e = get_exception()
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
user_facts = get_user_facts(cursor)
if module.check_mode:
changed = not check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles)
elif state == 'absent':
try:
changed = absent(user_facts, cursor, user, roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
elif state in ['present', 'locked']:
try:
changed = present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
except NotSupportedError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
except CannotDropError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception:
e = get_exception()
module.fail_json(msg=e)
module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
if __name__ == '__main__':
main()
| gpl-3.0 |
wengole/nasman | nasman/snapshots/utils/base.py | 1 | 2714 | import pathlib
import abc
import chardet
from django.utils.timezone import get_default_timezone_name, now
def decode_from_filesystem(path):
"""
Decode a given Path object to unicode by detecting its encoding
:param path: The Path object to decode
:type path: pathlib.Path
:return: Tuple of the str representation of the path, and it's original
codec name
:rtype: tuple
"""
value = str(path)
b = bytes(path)
codec = chardet.detect(b)['encoding']
return value.encode('utf-8', 'surrogateescape').decode(codec), codec
def encode_to_filesystem(value, codec):
"""
Encode the given value using the given codec back Path object
:param value: Unicode represented path
:type value: str
:param codec: Originally detected codec
:type codec: str
:return: Path object
:rtype: pathlib.Path
"""
return pathlib.Path(value.encode(codec).decode('utf-8', 'surrogateescape'))
class BaseFilesystem(metaclass=abc.ABCMeta):
def __init__(self, name, mountpoint=''):
self._name = name
self._mountpoint = mountpoint
@property
@abc.abstractmethod
def name(self):
"""
:return: The name of the filesystem
:rtype: basestring
"""
@property
@abc.abstractmethod
def mountpoint(self):
"""
:return: The mountpoint of the filesystem
:rtype: basestring
"""
class BaseSnapshot(metaclass=abc.ABCMeta):
def __init__(self, name, timestamp=None, filesystem=None):
self._name = name
self._filesystem = filesystem
if timestamp is not None:
self._timestamp = timestamp
else:
self._timestamp = now()
@property
@abc.abstractmethod
def name(self):
"""
:return: The name of the snapshot
:rtype: basestring
"""
@property
@abc.abstractmethod
def timestamp(self):
"""
:return: The creation time of this snapshot
:rtype: `datetime.datetime`
"""
@property
@abc.abstractmethod
def filesystem(self):
"""
:return: The parent filesystem of this snapshot
:rtype: `BaseFilesystem`
"""
class BaseUtil(metaclass=abc.ABCMeta):
timezone_name = get_default_timezone_name()
@classmethod
@abc.abstractmethod
def get_filesystems(cls):
"""
Gets a list of filesystems
:return: A list of `BaseFilesystem` objects
:rtype: list
"""
@classmethod
@abc.abstractmethod
def get_snapshots(cls):
"""
Gets a list of snapshots
:return: A list of `BaseSnapshot`
:rtype: list
"""
| bsd-3-clause |
DoWhileGeek/slice | setup.py | 1 | 1915 | import contextlib
import json
import os
import re
import subprocess
from setuptools import setup
VERSION_FILE = os.path.join(os.path.dirname(__file__), "version.json")
def _get_git_description():
try:
return subprocess.check_output(["git", "describe"]).decode("utf-8").strip()
except subprocess.CalledProcessError:
return None
def _create_version_from_description(git_description):
match = re.match(r'(?P<tag>[\d\.]+)-(?P<offset>[\d]+)-(?P<sha>\w{8})', git_description)
if not match:
version = git_description
else:
version = "{tag}.post{offset}".format(**match.groupdict())
return version
def get_version():
with open(VERSION_FILE) as version_file:
return json.loads(version_file.read())["version"]
@contextlib.contextmanager
def write_version():
git_description = _get_git_description()
version = _create_version_from_description(git_description) if git_description else None
if version:
with open(VERSION_FILE, 'w') as version_file:
version_file.write(json.dumps({"version": version}))
yield
def main():
with write_version():
setup(
name="slice",
url="https://github.com/DoWhileGeek/slice",
description="Terminal application that enables slicing for 3d printing in the cloud.",
author="Joeseph Rodrigues",
author_email="[email protected]",
version=get_version(),
install_requires=[
"authentise-services==0.6.0",
"ConfigArgParse==0.9.3",
"appdirs==1.4.0",
],
extras_require={
'develop': [
'pytest==2.6.4',
'httpretty==0.8.10',
'twine==1.5.0',
]},
scripts=["bin/slice"]
)
if __name__ == "__main__":
main()
| mit |
nwjs/chromium.src | third_party/google_appengine_cloudstorage/cloudstorage/common.py | 120 | 11732 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Helpers shared by cloudstorage_stub and cloudstorage_api."""
__all__ = ['CS_XML_NS',
'CSFileStat',
'dt_str_to_posix',
'local_api_url',
'LOCAL_GCS_ENDPOINT',
'local_run',
'get_access_token',
'get_metadata',
'GCSFileStat',
'http_time_to_posix',
'memory_usage',
'posix_time_to_http',
'posix_to_dt_str',
'set_access_token',
'validate_options',
'validate_bucket_name',
'validate_bucket_path',
'validate_file_path',
]
import calendar
import datetime
from email import utils as email_utils
import logging
import os
import re
try:
from google.appengine.api import runtime
except ImportError:
from google.appengine.api import runtime
_GCS_BUCKET_REGEX_BASE = r'[a-z0-9\.\-_]{3,63}'
_GCS_BUCKET_REGEX = re.compile(_GCS_BUCKET_REGEX_BASE + r'$')
_GCS_BUCKET_PATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'$')
_GCS_PATH_PREFIX_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'.*')
_GCS_FULLPATH_REGEX = re.compile(r'/' + _GCS_BUCKET_REGEX_BASE + r'/.*')
_GCS_METADATA = ['x-goog-meta-',
'content-disposition',
'cache-control',
'content-encoding']
_GCS_OPTIONS = _GCS_METADATA + ['x-goog-acl']
CS_XML_NS = 'http://doc.s3.amazonaws.com/2006-03-01'
LOCAL_GCS_ENDPOINT = '/_ah/gcs'
_access_token = ''
_MAX_GET_BUCKET_RESULT = 1000
def set_access_token(access_token):
"""Set the shared access token to authenticate with Google Cloud Storage.
When set, the library will always attempt to communicate with the
real Google Cloud Storage with this token even when running on dev appserver.
Note the token could expire so it's up to you to renew it.
When absent, the library will automatically request and refresh a token
on appserver, or when on dev appserver, talk to a Google Cloud Storage
stub.
Args:
access_token: you can get one by run 'gsutil -d ls' and copy the
str after 'Bearer'.
"""
global _access_token
_access_token = access_token
def get_access_token():
"""Returns the shared access token."""
return _access_token
class GCSFileStat(object):
"""Container for GCS file stat."""
def __init__(self,
filename,
st_size,
etag,
st_ctime,
content_type=None,
metadata=None,
is_dir=False):
"""Initialize.
For files, the non optional arguments are always set.
For directories, only filename and is_dir is set.
Args:
filename: a Google Cloud Storage filename of form '/bucket/filename'.
st_size: file size in bytes. long compatible.
etag: hex digest of the md5 hash of the file's content. str.
st_ctime: posix file creation time. float compatible.
content_type: content type. str.
metadata: a str->str dict of user specified options when creating
the file. Possible keys are x-goog-meta-, content-disposition,
content-encoding, and cache-control.
is_dir: True if this represents a directory. False if this is a real file.
"""
self.filename = filename
self.is_dir = is_dir
self.st_size = None
self.st_ctime = None
self.etag = None
self.content_type = content_type
self.metadata = metadata
if not is_dir:
self.st_size = long(st_size)
self.st_ctime = float(st_ctime)
if etag[0] == '"' and etag[-1] == '"':
etag = etag[1:-1]
self.etag = etag
def __repr__(self):
if self.is_dir:
return '(directory: %s)' % self.filename
return (
'(filename: %(filename)s, st_size: %(st_size)s, '
'st_ctime: %(st_ctime)s, etag: %(etag)s, '
'content_type: %(content_type)s, '
'metadata: %(metadata)s)' %
dict(filename=self.filename,
st_size=self.st_size,
st_ctime=self.st_ctime,
etag=self.etag,
content_type=self.content_type,
metadata=self.metadata))
def __cmp__(self, other):
if not isinstance(other, self.__class__):
raise ValueError('Argument to cmp must have the same type. '
'Expect %s, got %s', self.__class__.__name__,
other.__class__.__name__)
if self.filename > other.filename:
return 1
elif self.filename < other.filename:
return -1
return 0
def __hash__(self):
if self.etag:
return hash(self.etag)
return hash(self.filename)
CSFileStat = GCSFileStat
def get_metadata(headers):
"""Get user defined options from HTTP response headers."""
return dict((k, v) for k, v in headers.iteritems()
if any(k.lower().startswith(valid) for valid in _GCS_METADATA))
def validate_bucket_name(name):
"""Validate a Google Storage bucket name.
Args:
name: a Google Storage bucket name with no prefix or suffix.
Raises:
ValueError: if name is invalid.
"""
_validate_path(name)
if not _GCS_BUCKET_REGEX.match(name):
raise ValueError('Bucket should be 3-63 characters long using only a-z,'
'0-9, underscore, dash or dot but got %s' % name)
def validate_bucket_path(path):
"""Validate a Google Cloud Storage bucket path.
Args:
path: a Google Storage bucket path. It should have form '/bucket'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_BUCKET_PATH_REGEX.match(path):
raise ValueError('Bucket should have format /bucket '
'but got %s' % path)
def validate_file_path(path):
"""Validate a Google Cloud Storage file path.
Args:
path: a Google Storage file path. It should have form '/bucket/filename'.
Raises:
ValueError: if path is invalid.
"""
_validate_path(path)
if not _GCS_FULLPATH_REGEX.match(path):
raise ValueError('Path should have format /bucket/filename '
'but got %s' % path)
def _process_path_prefix(path_prefix):
"""Validate and process a Google Cloud Stoarge path prefix.
Args:
path_prefix: a Google Cloud Storage path prefix of format '/bucket/prefix'
or '/bucket/' or '/bucket'.
Raises:
ValueError: if path is invalid.
Returns:
a tuple of /bucket and prefix. prefix can be None.
"""
_validate_path(path_prefix)
if not _GCS_PATH_PREFIX_REGEX.match(path_prefix):
raise ValueError('Path prefix should have format /bucket, /bucket/, '
'or /bucket/prefix but got %s.' % path_prefix)
bucket_name_end = path_prefix.find('/', 1)
bucket = path_prefix
prefix = None
if bucket_name_end != -1:
bucket = path_prefix[:bucket_name_end]
prefix = path_prefix[bucket_name_end + 1:] or None
return bucket, prefix
def _validate_path(path):
"""Basic validation of Google Storage paths.
Args:
path: a Google Storage path. It should have form '/bucket/filename'
or '/bucket'.
Raises:
ValueError: if path is invalid.
TypeError: if path is not of type basestring.
"""
if not path:
raise ValueError('Path is empty')
if not isinstance(path, basestring):
raise TypeError('Path should be a string but is %s (%s).' %
(path.__class__, path))
def validate_options(options):
"""Validate Google Cloud Storage options.
Args:
options: a str->basestring dict of options to pass to Google Cloud Storage.
Raises:
ValueError: if option is not supported.
TypeError: if option is not of type str or value of an option
is not of type basestring.
"""
if not options:
return
for k, v in options.iteritems():
if not isinstance(k, str):
raise TypeError('option %r should be a str.' % k)
if not any(k.lower().startswith(valid) for valid in _GCS_OPTIONS):
raise ValueError('option %s is not supported.' % k)
if not isinstance(v, basestring):
raise TypeError('value %r for option %s should be of type basestring.' %
(v, k))
def http_time_to_posix(http_time):
"""Convert HTTP time format to posix time.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1
for http time format.
Args:
http_time: time in RFC 2616 format. e.g.
"Mon, 20 Nov 1995 19:12:08 GMT".
Returns:
A float of secs from unix epoch.
"""
if http_time is not None:
return email_utils.mktime_tz(email_utils.parsedate_tz(http_time))
def posix_time_to_http(posix_time):
"""Convert posix time to HTML header time format.
Args:
posix_time: unix time.
Returns:
A datatime str in RFC 2616 format.
"""
if posix_time:
return email_utils.formatdate(posix_time, usegmt=True)
_DT_FORMAT = '%Y-%m-%dT%H:%M:%S'
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple())
def posix_to_dt_str(posix):
"""Reverse of str_to_datetime.
This is used by GCS stub to generate GET bucket XML response.
Args:
posix: A float of secs from unix epoch.
Returns:
A datetime str.
"""
dt = datetime.datetime.utcfromtimestamp(posix)
dt_str = dt.strftime(_DT_FORMAT)
return dt_str + '.000Z'
def local_run():
"""Whether we should hit GCS dev appserver stub."""
server_software = os.environ.get('SERVER_SOFTWARE')
if server_software is None:
return True
if 'remote_api' in server_software:
return False
if server_software.startswith(('Development', 'testutil')):
return True
return False
def local_api_url():
"""Return URL for GCS emulation on dev appserver."""
return 'http://%s%s' % (os.environ.get('HTTP_HOST'), LOCAL_GCS_ENDPOINT)
def memory_usage(method):
"""Log memory usage before and after a method."""
def wrapper(*args, **kwargs):
logging.info('Memory before method %s is %s.',
method.__name__, runtime.memory_usage().current())
result = method(*args, **kwargs)
logging.info('Memory after method %s is %s',
method.__name__, runtime.memory_usage().current())
return result
return wrapper
def _add_ns(tagname):
return '{%(ns)s}%(tag)s' % {'ns': CS_XML_NS,
'tag': tagname}
_T_CONTENTS = _add_ns('Contents')
_T_LAST_MODIFIED = _add_ns('LastModified')
_T_ETAG = _add_ns('ETag')
_T_KEY = _add_ns('Key')
_T_SIZE = _add_ns('Size')
_T_PREFIX = _add_ns('Prefix')
_T_COMMON_PREFIXES = _add_ns('CommonPrefixes')
_T_NEXT_MARKER = _add_ns('NextMarker')
_T_IS_TRUNCATED = _add_ns('IsTruncated')
| bsd-3-clause |
johndpope/tensorflow | tensorflow/python/lib/io/tf_record.py | 45 | 3787 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For reading and writing TFRecords files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
class TFRecordCompressionType(object):
"""The type of compression for the record."""
NONE = 0
ZLIB = 1
GZIP = 2
# NOTE(vrv): This will eventually be converted into a proto. to match
# the interface used by the C++ RecordWriter.
class TFRecordOptions(object):
"""Options used for manipulating TFRecord files."""
compression_type_map = {
TFRecordCompressionType.ZLIB: "ZLIB",
TFRecordCompressionType.GZIP: "GZIP",
TFRecordCompressionType.NONE: ""
}
def __init__(self, compression_type):
self.compression_type = compression_type
@classmethod
def get_compression_type_string(cls, options):
if not options:
return ""
return cls.compression_type_map[options.compression_type]
def tf_record_iterator(path, options=None):
"""An iterator that read the records from a TFRecords file.
Args:
path: The path to the TFRecords file.
options: (optional) A TFRecordOptions object.
Yields:
Strings.
Raises:
IOError: If `path` cannot be opened for reading.
"""
compression_type = TFRecordOptions.get_compression_type_string(options)
with errors.raise_exception_on_not_ok_status() as status:
reader = pywrap_tensorflow.PyRecordReader_New(
compat.as_bytes(path), 0, compat.as_bytes(compression_type), status)
if reader is None:
raise IOError("Could not open %s." % path)
while True:
try:
with errors.raise_exception_on_not_ok_status() as status:
reader.GetNext(status)
except errors.OutOfRangeError:
break
yield reader.record()
reader.Close()
class TFRecordWriter(object):
"""A class to write records to a TFRecords file.
This class implements `__enter__` and `__exit__`, and can be used
in `with` blocks like a normal file.
"""
# TODO(josh11b): Support appending?
def __init__(self, path, options=None):
"""Opens file `path` and creates a `TFRecordWriter` writing to it.
Args:
path: The path to the TFRecords file.
options: (optional) A TFRecordOptions object.
Raises:
IOError: If `path` cannot be opened for writing.
"""
compression_type = TFRecordOptions.get_compression_type_string(options)
with errors.raise_exception_on_not_ok_status() as status:
self._writer = pywrap_tensorflow.PyRecordWriter_New(
compat.as_bytes(path), compat.as_bytes(compression_type), status)
def __enter__(self):
"""Enter a `with` block."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Exit a `with` block, closing the file."""
self.close()
def write(self, record):
"""Write a string record to the file.
Args:
record: str
"""
self._writer.WriteRecord(record)
def close(self):
"""Close the file."""
self._writer.Close()
| apache-2.0 |
maciekcc/tensorflow | tensorflow/contrib/testing/python/framework/util_test.py | 198 | 4128 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python.training import summary_io
def assert_summary(expected_tags, expected_simple_values, summary_proto):
"""Asserts summary contains the specified tags and values.
Args:
expected_tags: All tags in summary.
expected_simple_values: Simply values for some tags.
summary_proto: Summary to validate.
Raises:
ValueError: if expectations are not met.
"""
actual_tags = set()
for value in summary_proto.value:
actual_tags.add(value.tag)
if value.tag in expected_simple_values:
expected = expected_simple_values[value.tag]
actual = value.simple_value
np.testing.assert_almost_equal(
actual, expected, decimal=2, err_msg=value.tag)
expected_tags = set(expected_tags)
if expected_tags != actual_tags:
raise ValueError('Expected tags %s, got %s.' % (expected_tags, actual_tags))
def to_summary_proto(summary_str):
"""Create summary based on latest stats.
Args:
summary_str: Serialized summary.
Returns:
summary_pb2.Summary.
Raises:
ValueError: if tensor is not a valid summary tensor.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
return summary
# TODO(ptucker): Move to a non-test package?
def latest_event_file(base_dir):
"""Find latest event file in `base_dir`.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
File path, or `None` if none exists.
"""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
return sorted(file_paths)[-1] if file_paths else None
def latest_events(base_dir):
"""Parse events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
Iterable of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
file_path = latest_event_file(base_dir)
return summary_io.summary_iterator(file_path) if file_path else []
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir.
Args:
base_dir: Base directory in which TF event flies are stored.
Returns:
List of event protos.
Raises:
ValueError: if no event files exist under base_dir.
"""
return [e for e in latest_events(base_dir) if e.HasField('summary')]
def simple_values_from_events(events, tags):
"""Parse summaries from events with simple_value.
Args:
events: List of tensorflow.Event protos.
tags: List of string event tags corresponding to simple_value summaries.
Returns:
dict of tag:value.
Raises:
ValueError: if a summary with a specified tag does not contain simple_value.
"""
step_by_tag = {}
value_by_tag = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
tag = v.tag
if tag in tags:
if not v.HasField('simple_value'):
raise ValueError('Summary for %s is not a simple_value.' % tag)
# The events are mostly sorted in step order, but we explicitly check
# just in case.
if tag not in step_by_tag or e.step > step_by_tag[tag]:
step_by_tag[tag] = e.step
value_by_tag[tag] = v.simple_value
return value_by_tag
| apache-2.0 |
aioworkers/aioworkers | aioworkers/core/context.py | 1 | 17806 | import asyncio
import contextlib
import inspect
import logging.config
import os
from collections import OrderedDict
from functools import wraps
from typing import (
Awaitable,
Callable,
FrozenSet,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from ..utils import import_name
from .base import AbstractEntity, NameLogger
from .config import ValueExtractor
T = TypeVar('T')
TSeq = Union[Sequence, Set, FrozenSet]
DOT = '.'
class Octopus(MutableMapping):
def _create_item(self):
return Octopus()
def _get_item(self, key, create):
sp = key.split(DOT, 1)
k = sp[0]
if k in self.__dict__:
v = self.__dict__[k]
elif create:
v = self._create_item()
self[k] = v
else:
raise KeyError(key)
if len(sp) == 1:
return v
if isinstance(v, Octopus):
return v._get_item(sp[-1], create)
try:
return getattr(v, sp[-1])
except AttributeError:
return v[sp[-1]]
def items(self):
return self.__dict__.items()
def __getitem__(self, key):
if not isinstance(key, str):
return
return self._get_item(key, False)
def __setitem__(self, key, value):
if not isinstance(key, str):
return
sp = key.rsplit(DOT, 1)
if len(sp) == 2:
f = self._get_item(sp[0], True)
else:
f = self
setattr(f, sp[-1], value)
def __delitem__(self, key): # pragma: no cover
pass
def __iter__(self): # pragma: no cover
pass
def __len__(self): # pragma: no cover
return len(self.__dict__)
def __repr__(self, *, indent=1, header=False):
if os.environ.get('AIOWORKERS_MODE') != 'console':
return '{cls}({id}, attrs=[{attrs}])'.format(
cls=self.__class__.__name__,
id=id(self),
attrs=', '.join(
x for x in self.__dict__
if not x.startswith('_')
),
)
result = []
if header:
result.extend([' ' * indent, '<', self.__class__.__name__, '>\n'])
indent += 1
for k, v in sorted(self.__dict__.items()):
if k.startswith('_'):
continue
result.append(' ' * indent)
result.append(k)
result.append(': ')
if isinstance(v, Octopus):
result.append('\n')
result.append(v.__repr__(indent=indent + 1, header=False))
else:
result.append(str(v))
result.append('\n')
return ''.join(result)
def find_iter(self, cls, *, exclude=None):
# type: (Type[T], Optional[Set[int]]) -> Iterable[Tuple[str, T]]
can_add = False
if not exclude:
can_add = True
exclude = {id(self)}
for pp, obj in self.items():
if isinstance(obj, Octopus):
identy = id(obj)
if identy in exclude:
continue
if can_add:
exclude.add(identy)
for pc, obj in obj.find_iter(cls, exclude=exclude):
yield DOT.join((pp, pc)), obj
if isinstance(obj, cls):
yield pp, obj
class Signal:
LOG_RUN = 'To emit in %s'
LOG_END = '[%s/%s] End for %s'
def __init__(self, context: 'Context', name: str = None):
self._counter = 0
self._signals = [] # type: List
self._context = context
self._name = name or str(id(self))
self._logger = NameLogger(
logging.getLogger('aioworkers.signals'),
{
'name': '.'.join([
'aioworkers.signals', self._name,
]),
}
)
def append(self, signal: Callable, groups: TSeq = ()):
if groups:
groups = {str(g) for g in groups}
self._signals.append((signal, groups))
async def _run_async(self, name: str, awaitable: Awaitable) -> None:
self._logger.info(self.LOG_RUN, name)
await awaitable
self._counter += 1
self._logger.info(
self.LOG_END,
self._counter, len(self._signals),
name,
)
def _run_sync(self, name: str, func: Callable) -> None:
params = inspect.signature(func).parameters
self._logger.info(self.LOG_RUN, name)
try:
if 'context' in params:
func(self._context)
else:
func()
self._counter += 1
self._logger.info(
self.LOG_END,
self._counter, len(self._signals),
name,
)
except Exception:
self._logger.exception('Error on run signal %s', self._name)
def _send(self, group_resolver: 'GroupResolver') -> List[Awaitable]:
self._counter = 0
coros = [] # type: List
for i, g in self._signals:
if not group_resolver.match(g):
continue
instance = getattr(i, '__self__', None)
name = instance and repr(instance) or repr(i)
if isinstance(instance, AbstractEntity):
name = instance.config.get('name') or repr(instance)
if asyncio.iscoroutinefunction(i):
params = inspect.signature(i).parameters
if 'context' in params:
awaitable = i(self._context)
else:
awaitable = i()
awaitable = self._run_async(name, awaitable)
coro = wraps(i)(lambda x: x)(awaitable)
elif asyncio.iscoroutine(i):
coro = self._run_async(name, i)
elif callable(i):
self._run_sync(name, i)
continue
else:
continue
coros.append(coro)
return coros
def send(self, group_resolver, *, coroutine=True):
coros = self._send(group_resolver)
if coroutine:
return self._context.wait_all(coros)
class GroupResolver:
def __init__(
self,
include=None,
exclude=None,
all_groups=False,
default=True,
):
self._include = frozenset(include or ())
self._exclude = frozenset(exclude or ())
self._all = all_groups
self._default = default
def match(self, groups):
if not groups:
return self._default
groups = {str(e) for e in groups}
exclude = groups.intersection(self._exclude)
include = groups.intersection(self._include)
if self._all:
if exclude:
return ()
else:
if not include:
return ()
return groups
class ContextProcessor:
def __init__(self, context: 'Context', path: str, value: ValueExtractor):
self.context = context
self.path = path
self.value = value
@classmethod
def match(cls, context: 'Context', path: str, value: ValueExtractor):
raise NotImplementedError
async def process(self):
raise NotImplementedError
class LoggingContextProcessor(ContextProcessor):
key = 'logging'
process = None
@classmethod
def match(cls, context, path, value):
if path == cls.key and value and isinstance(value, Mapping):
m = cls(context, path, value)
m.configure(value)
return m
def configure(self, value):
if value:
cfg = dict(value)
cfg.setdefault('version', 1)
logging.config.dictConfig(cfg)
class GroupsContextProcessor(ContextProcessor):
key = 'groups'
process = None
@classmethod
def match(cls, context, path, value):
if not isinstance(value, Mapping):
return
groups = value.get(cls.key)
if not context._group_resolver.match(groups):
return cls(context, path, value)
class EntityContextProcessor(ContextProcessor):
key = 'cls'
def __init__(self, context: 'Context', path: str, value: ValueExtractor):
super().__init__(context, path, value)
cls = import_name(value[self.key])
if issubclass(cls, AbstractEntity):
entity = cls(None)
else:
try:
signature = inspect.signature(cls)
signature.bind(config=None, context=None, loop=None)
except TypeError as e:
raise TypeError(
'Error while creating entity on {} from {}: {}'.format(
path, value[self.key], e))
except ValueError as e:
raise ValueError(
'Error while checking entity on {} from {}: {}'.format(
path, value[self.key], e))
entity = cls(value, context=context, loop=context.loop)
context[path] = entity
self.entity = entity
@classmethod
def match(cls, context: 'Context', path: str,
value: ValueExtractor) -> Optional[ContextProcessor]:
if isinstance(value, Mapping) and cls.key in value:
return cls(context, path, value)
return None
async def process(self):
await self.entity.init()
class InstanceEntityContextProcessor(EntityContextProcessor):
key = 'obj'
def __init__(self, context: 'Context', path: str, value: ValueExtractor):
ContextProcessor.__init__(self, context, path, value)
self.entity = getattr(context, path, None)
if isinstance(self.entity, AbstractEntity):
self.entity.set_config(value.new_parent(name=path))
elif not isinstance(self.entity, Mapping):
entity = import_name(value[self.key])
context[path] = entity
@classmethod
def match(cls, context: 'Context', path: str,
value: ValueExtractor) -> Optional[ContextProcessor]:
e = context[path]
if isinstance(e, AbstractEntity):
return cls(context, path, value)
else:
return super().match(context, path, value)
async def process(self):
if isinstance(self.entity, AbstractEntity):
await self.entity.init()
class FuncContextProcessor(ContextProcessor):
key = 'func'
process = None
def __init__(self, context: 'Context', path: str, value: ValueExtractor):
super().__init__(context, path, value)
func = import_name(value[self.key])
args = value.get('args', ())
kwargs = value.get('kwargs', {})
context[path] = func(*args, **kwargs)
@classmethod
def match(cls, context: 'Context', path: str,
value: ValueExtractor) -> Optional[ContextProcessor]:
if isinstance(value, Mapping) and cls.key in value:
return cls(context, path, value)
return None
class RootContextProcessor(ContextProcessor):
processors = (
LoggingContextProcessor,
GroupsContextProcessor,
InstanceEntityContextProcessor,
EntityContextProcessor,
FuncContextProcessor,
)
def __init__(self, context, path=None, value=None):
super().__init__(context, path, value)
self._built = False
self.on_ready = Signal(context, name='ready')
self.processors = OrderedDict((i.key, i) for i in self.processors)
def __iter__(self):
yield from self.processors.values()
def processing(self, config, path=None):
for k, v in config.items():
if '/' in k:
continue
p = '.'.join(i for i in (path, k) if i)
for processor in self:
m = processor.match(self.context, p, v)
if m is None:
continue
if m.process:
groups = None
if isinstance(v, Mapping):
groups = v.get('groups')
self.on_ready.append(m.process, groups)
break
else:
if isinstance(v, Mapping):
self.processing(v, p)
def build(self, config):
if not self._built:
if config is None:
raise RuntimeError('Config is empty')
self.value = config
self.processing(self.value)
self._built = True
async def process(self, config=None):
self.build(config)
await self.on_ready.send(self.context._group_resolver)
class Context(AbstractEntity, Octopus):
def __init__(self, *args, **kwargs):
self._group_resolver = kwargs.pop('group_resolver', GroupResolver())
self._on_connect = Signal(self, name='connect')
self._on_start = Signal(self, name='start')
self._on_stop = Signal(self, name='stop')
self._on_disconnect = Signal(self, name='disconnect')
self._on_cleanup = Signal(self, name='cleanup')
self.logger = logging.getLogger('aioworkers')
root_processor = kwargs.pop('root_processor', RootContextProcessor)
self.processors = root_processor(self)
super().__init__(*args, **kwargs)
def set_group_resolver(self, gr):
self._group_resolver = gr
def set_config(self, config) -> None:
self._config = config
def set_loop(self, loop):
if self._loop is not None:
raise RuntimeError('Loop already set')
self._set_loop(loop)
for path, obj in self.find_iter(AbstractEntity):
obj._set_loop(loop)
@contextlib.contextmanager
def processes(self):
gr = GroupResolver(all_groups=True)
self.set_group_resolver(gr)
self.processors.build(self.config)
yield
self.on_cleanup.send(gr, coroutine=False)
@property
def on_connect(self):
return self._on_connect
@property
def on_start(self):
return self._on_start
@property
def on_stop(self):
return self._on_stop
@property
def on_disconnect(self):
return self._on_disconnect
@property
def on_cleanup(self):
return self._on_cleanup
async def init(self):
if self._loop is None:
self.set_loop(asyncio.get_event_loop())
await self.processors.process(self.config)
async def wait_all(self, coros, timeout=None):
if not coros:
return
d, p = await asyncio.wait(coros, loop=self.loop, timeout=timeout)
assert not p, '\n'.join(map(repr, p))
for f in d:
if f.exception():
self.logger.exception('ERROR', exc_info=f.exception())
async def connect(self):
await self.on_connect.send(self._group_resolver)
async def start(self):
await self.on_start.send(self._group_resolver)
async def stop(self):
await self.on_stop.send(self._group_resolver)
async def disconnect(self):
await self.on_disconnect.send(self._group_resolver)
def run_forever(self):
try:
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.loop.close()
def __dir__(self) -> List[str]:
result = []
if self.config:
result.extend(self.config)
result.extend(
k for k in super().__dir__()
if k not in self.config
)
else:
result.extend(super().__dir__())
return result
def __getitem__(self, item):
if item is None:
return
elif isinstance(item, str):
try:
return super().__getitem__(item)
except Exception:
pass
try:
return self._config[item]
except Exception:
pass
try:
return import_name(item)
except Exception:
pass
raise KeyError(item)
def _setattr(self, key, value, method):
if isinstance(value, AbstractEntity):
value.set_context(self)
if self.config and self.config.get(key):
config = self.config[key].new_parent(name=key)
value.set_config(config)
if isinstance(key, str) and DOT not in key:
self.__dict__[key] = value
else:
return method(key, value)
def __setitem__(self, key, value):
self._setattr(key, value, super().__setitem__)
def __setattr__(self, key, value):
self._setattr(key, value, super().__setattr__)
def __getattr__(self, item):
try:
return self._config[item]
except KeyError:
raise AttributeError(item)
def __enter__(self):
if self._loop is None:
self.set_loop(asyncio.get_event_loop())
self.loop.run_until_complete(self.__aenter__())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.loop.is_closed():
self.loop.run_until_complete(
self.__aexit__(exc_type, exc_val, exc_tb),
)
async def __aenter__(self):
await self.init()
await self.connect()
await self.start()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.stop()
await self.disconnect()
await self._on_cleanup.send(self._group_resolver)
def get_object(self, path):
if path.startswith('.'):
return self[path[1:]]
return import_name(path)
| apache-2.0 |
aESeguridad/GERE | venv/local/lib/python2.7/codecs.py | 62 | 35172 | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import __builtin__, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError, why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = '\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = '\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = '\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = '\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = '\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can be
passed piece by piece to the encode() method. The IncrementalEncoder remembers
the state of the Encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
self.buffer = "" # unencoded input that is kept between calls to encode()
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can be
passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Creates a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete byte
sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
self.buffer = "" # undecoded input that is kept between calls to decode()
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = ""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = ""
# For str->str decoding this will stay a str
# For str->unicode decoding the first read will promote it to unicode
self.charbuffer = ""
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = "".join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
elif size >= 0:
if len(self.charbuffer) >= size:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError, exc:
if firstline:
newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = ""
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(False)[0]
return line
readsize = size or 72
line = ""
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if data.endswith("\r"):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = "".join(lines[1:]) + self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(False)[0]
break
if readsize<8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = ""
self.charbuffer = u""
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def next(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def next(self):
""" Return the next decoded line from the input stream."""
return self.reader.next()
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(1)
def next(self):
""" Return the next decoded line from the input stream."""
data = self.reader.next()
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None:
if 'U' in mode:
# No automatic conversion of '\n' is done on reading and writing
mode = mode.strip().replace('U', '')
if mode[:1] not in set('rwa'):
mode = 'r' + mode
if 'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = __builtin__.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode("", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
res = {}
for i in rng:
res[i]=i
return res
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| gpl-3.0 |
dreyou/ebstarter | reciever.py | 1 | 3846 | #!/usr/bin/env python
import pika
import sys
import signal
import json
import logging
from optparse import OptionParser
import eblocal
def createApplication(command):
res = eblocal.createApp(command["name"], command["source"])
if res is None:
logging.error("Can't create application")
return
res = eblocal.createEnv(command["name"], command["source"])
if res is None:
logging.error("Can't create application environment, deleting application")
eblocal.deleteApp(command["name"])
return
logging.info("Application: "+command["name"]+" created")
def rebuildApplicationEnvironment(command):
res = eblocal.getEnv(command["name"])
if res is None:
logging.error("No application environment present, creating")
createApplication(command)
return
res = eblocal.rebuildEnv(command["name"])
if res is None:
logging.error("Can't rebuild environment")
def deleteApplication(command):
res = eblocal.deleteApp(command["name"])
if res is None:
logging.error("Can't delete application")
def deleteAgedApplication(command):
age = eblocal.getEnvAge(command["name"])
if age is None:
logging.error("Can't detect environment age")
return
if age < options.max_age:
return
logging.info("Environment age > "+str(options.max_age)+" hrs, deleting.")
res = eblocal.deleteApp(command["name"])
if res is None:
logging.error("Can't delete application")
operations = dict()
operations['create'] = createApplication
operations['rebuild'] = rebuildApplicationEnvironment
operations['delete'] = deleteApplication
operations['deleteaged'] = deleteAgedApplication
def on_message(channel, method_frame, header_frame, body):
logging.debug(method_frame.delivery_tag)
logging.debug(body)
logging.debug(header_frame)
try:
command = json.loads(body)
logging.info("Command: "+command["operation"]+" for: "+command["name"]+", source is: "+command["source"])
if command["operation"] in operations:
if options.run == "yes":
logging.info("Run operation: "+command["operation"])
operations[command["operation"]](command)
else:
logging.info("Simulate run operation: "+command["operation"])
except:
logging.exception("Error while running command: "+str(sys.exc_info()[0]))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def signal_handler(sig, frame):
logging.info("Interrupted with: "+str(sig)+", exit now!")
channel.stop_consuming()
connection.close()
sys.exit(0)
parser = OptionParser()
parser.add_option("-r", "--run", type="string", help="if not set to \"yes\", do really nothing, just accept messages", dest="run", default="no")
parser.add_option("-q", "--queue", help="queue name", type="string", dest="queue", default="test")
parser.add_option("-l", "--log-level", help="log level", dest="log_level", type="int", default=1)
parser.add_option("-m", "--max-age", help="maximum application age in hours", dest="max_age", type="int", default=6)
(options, args) = parser.parse_args()
if options.log_level == 1:
log_level = logging.INFO
elif options.log_level >= 2:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
logging.info("Start reciever on queue: "+options.queue)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
connection = pika.BlockingConnection()
channel = connection.channel()
channel.queue_declare(queue=options.queue)
channel.exchange_declare(exchange='commands', type='fanout')
channel.queue_bind(exchange='commands', queue=options.queue)
channel.basic_consume(on_message, queue=options.queue)
channel.start_consuming()
| mit |
nirmeshk/oh-mainline | vendor/packages/gdata/tests/gdata_tests/blogger_test.py | 128 | 3855 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import unittest
from gdata import test_data
import gdata.blogger
import atom
class BlogEntryTest(unittest.TestCase):
def testBlogEntryFromString(self):
entry = gdata.blogger.BlogEntryFromString(test_data.BLOG_ENTRY)
self.assertEquals(entry.GetBlogName(), 'blogName')
self.assertEquals(entry.GetBlogId(), 'blogID')
self.assertEquals(entry.title.text, 'Lizzy\'s Diary')
def testBlogPostFeedFromString(self):
feed = gdata.blogger.BlogPostFeedFromString(test_data.BLOG_POSTS_FEED)
self.assertEquals(len(feed.entry), 1)
self.assert_(isinstance(feed, gdata.blogger.BlogPostFeed))
self.assert_(isinstance(feed.entry[0], gdata.blogger.BlogPostEntry))
self.assertEquals(feed.entry[0].GetPostId(), 'postID')
self.assertEquals(feed.entry[0].GetBlogId(), 'blogID')
self.assertEquals(feed.entry[0].title.text, 'Quite disagreeable')
def testCommentFeedFromString(self):
feed = gdata.blogger.CommentFeedFromString(test_data.BLOG_COMMENTS_FEED)
self.assertEquals(len(feed.entry), 1)
self.assert_(isinstance(feed, gdata.blogger.CommentFeed))
self.assert_(isinstance(feed.entry[0], gdata.blogger.CommentEntry))
self.assertEquals(feed.entry[0].GetBlogId(), 'blogID')
self.assertEquals(feed.entry[0].GetCommentId(), 'commentID')
self.assertEquals(feed.entry[0].title.text, 'This is my first comment')
self.assertEquals(feed.entry[0].in_reply_to.source,
'http://blogName.blogspot.com/feeds/posts/default/postID')
self.assertEquals(feed.entry[0].in_reply_to.ref,
'tag:blogger.com,1999:blog-blogID.post-postID')
self.assertEquals(feed.entry[0].in_reply_to.href,
'http://blogName.blogspot.com/2007/04/first-post.html')
self.assertEquals(feed.entry[0].in_reply_to.type, 'text/html')
def testIdParsing(self):
entry = gdata.blogger.BlogEntry()
entry.id = atom.Id(
text='tag:blogger.com,1999:user-146606542.blog-4023408167658848')
self.assertEquals(entry.GetBlogId(), '4023408167658848')
entry.id = atom.Id(text='tag:blogger.com,1999:blog-4023408167658848')
self.assertEquals(entry.GetBlogId(), '4023408167658848')
class InReplyToTest(unittest.TestCase):
def testToAndFromString(self):
in_reply_to = gdata.blogger.InReplyTo(href='http://example.com/href',
ref='http://example.com/ref', source='http://example.com/my_post',
type='text/html')
xml_string = str(in_reply_to)
parsed = gdata.blogger.InReplyToFromString(xml_string)
self.assertEquals(parsed.source, in_reply_to.source)
self.assertEquals(parsed.href, in_reply_to.href)
self.assertEquals(parsed.ref, in_reply_to.ref)
self.assertEquals(parsed.type, in_reply_to.type)
class CommentEntryTest(unittest.TestCase):
def testToAndFromString(self):
comment = gdata.blogger.CommentEntry(content=atom.Content(text='Nifty!'),
in_reply_to=gdata.blogger.InReplyTo(
source='http://example.com/my_post'))
parsed = gdata.blogger.CommentEntryFromString(str(comment))
self.assertEquals(parsed.in_reply_to.source, comment.in_reply_to.source)
self.assertEquals(parsed.content.text, comment.content.text)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
carltongibson/django-redis-cache | redis_cache/utils.py | 2 | 5747 | import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from redis.connection import SSLConnection
from redis_cache.compat import (
smart_text, python_2_unicode_compatible, parse_qs, urlparse
)
try:
basestring
except NameError:
basestring = str
@python_2_unicode_compatible
class CacheKey(object):
"""
A stub string class that we can use to check if a key was created already.
"""
def __init__(self, key, versioned_key):
self._original_key = key
self._versioned_key = versioned_key
def __eq__(self, other):
return self._versioned_key == other
def __unicode__(self):
return smart_text(self._versioned_key)
def __hash__(self):
return hash(self._versioned_key)
__repr__ = __str__ = __unicode__
def get_servers(location):
"""Returns a list of servers given the server argument passed in from
Django.
"""
if isinstance(location, basestring):
servers = location.split(',')
elif hasattr(location, '__iter__'):
servers = location
else:
raise ImproperlyConfigured(
'"server" must be an iterable or string'
)
return servers
def import_class(path):
module_name, class_name = path.rsplit('.', 1)
try:
module = import_module(module_name)
except ImportError:
raise ImproperlyConfigured('Could not find module "%s"' % module_name)
else:
try:
return getattr(module, class_name)
except AttributeError:
raise ImproperlyConfigured('Cannot import "%s"' % class_name)
def parse_connection_kwargs(server, db=None, **kwargs):
"""
Return a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
redis:// creates a normal TCP socket connection
rediss:// creates a SSL wrapped TCP socket connection
unix:// creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. In the case
of conflicting arguments, querystring arguments always win.
NOTE: taken from `redis.ConnectionPool.from_url` in redis-py
"""
kwargs['unix_socket_path'] = ''
if '://' in server:
url = server
url_string = url
url = urlparse(url)
qs = ''
# in python2.6, custom URL schemes don't recognize querystring values
# they're left as part of the url.path.
if '?' in url.path and not url.query:
# chop the querystring including the ? off the end of the url
# and reparse it.
qs = url.path.split('?', 1)[1]
url = urlparse(url_string[:-(len(qs) + 1)])
else:
qs = url.query
url_options = {}
for name, value in parse_qs(qs).items():
if value and len(value) > 0:
url_options[name] = value[0]
# We only support redis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'password': url.password,
'unix_socket_path': url.path,
})
else:
url_options.update({
'host': url.hostname,
'port': int(url.port or 6379),
'password': url.password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and url.path:
try:
url_options['db'] = int(url.path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
url_options['connection_class'] = SSLConnection
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
# backwards compatability
if 'charset' in kwargs:
warnings.warn(DeprecationWarning(
'"charset" is deprecated. Use "encoding" instead'))
kwargs['encoding'] = kwargs.pop('charset')
if 'errors' in kwargs:
warnings.warn(DeprecationWarning(
'"errors" is deprecated. Use "encoding_errors" instead'))
kwargs['encoding_errors'] = kwargs.pop('errors')
else:
unix_socket_path = None
if ':' in server:
host, port = server.rsplit(':', 1)
try:
port = int(port)
except (ValueError, TypeError):
raise ImproperlyConfigured(
"{0} from {1} must be an integer".format(
repr(port),
server
)
)
else:
host, port = None, None
unix_socket_path = server
kwargs.update(
host=host,
port=port,
unix_socket_path=unix_socket_path,
db=db,
)
return kwargs
| bsd-3-clause |
EdgarSun/Django-Demo | django/template/context.py | 97 | 6298 | from copy import copy
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.http import HttpRequest
# Cache of actual callables.
_standard_context_processors = None
# We need the CSRF processor no matter what the user has in their settings,
# because otherwise it is a security vulnerability, and we can't afford to leave
# this to human error or failure to read migration instructions.
_builtin_context_processors = ('django.core.context_processors.csrf',)
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class EmptyClass(object):
# No-op class which takes no args to its __init__ method, to help implement
# __copy__
pass
class BaseContext(object):
def __init__(self, dict_=None):
dict_ = dict_ or {}
self.dicts = [dict_]
def __copy__(self):
duplicate = EmptyClass()
duplicate.__class__ = self.__class__
duplicate.__dict__ = self.__dict__.copy()
duplicate.dicts = duplicate.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self):
d = {}
self.dicts.append(d)
return d
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True, current_app=None, use_l10n=None):
self.autoescape = autoescape
self.use_l10n = use_l10n
self.current_app = current_app
self.render_context = RenderContext()
super(Context, self).__init__(dict_)
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
def new(self, values=None):
"""
Returns a new Context with the same 'autoescape' value etc, but with
only the values given in 'values' stored.
"""
return self.__class__(dict_=values, autoescape=self.autoescape,
current_app=self.current_app, use_l10n=self.use_l10n)
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
d = self.dicts[-1]
if key in d:
return d[key]
return otherwise
# This is a function rather than module-level procedural code because we only
# want it to execute if somebody uses RequestContext.
def get_standard_processors():
from django.conf import settings
global _standard_context_processors
if _standard_context_processors is None:
processors = []
collect = []
collect.extend(_builtin_context_processors)
collect.extend(settings.TEMPLATE_CONTEXT_PROCESSORS)
for path in collect:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing request processor module %s: "%s"' % (module, e))
try:
func = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" callable request processor' % (module, attr))
processors.append(func)
_standard_context_processors = tuple(processors)
return _standard_context_processors
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in TEMPLATE_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict=None, processors=None, current_app=None, use_l10n=None):
Context.__init__(self, dict, current_app=current_app, use_l10n=use_l10n)
if processors is None:
processors = ()
else:
processors = tuple(processors)
for processor in get_standard_processors() + processors:
self.update(processor(request))
| mit |
tudorvio/nova | nova/virt/xenapi/image/bittorrent.py | 23 | 4771 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import six.moves.urllib.parse as urlparse
from nova.i18n import _, _LW
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger(__name__)
xenapi_torrent_opts = [
cfg.StrOpt('torrent_base_url',
help='Base URL for torrent files; must contain a slash'
' character (see RFC 1808, step 6)'),
cfg.FloatOpt('torrent_seed_chance',
default=1.0,
help='Probability that peer will become a seeder.'
' (1.0 = 100%)'),
cfg.IntOpt('torrent_seed_duration',
default=3600,
help='Number of seconds after downloading an image via'
' BitTorrent that it should be seeded for other peers.'),
cfg.IntOpt('torrent_max_last_accessed',
default=86400,
help='Cached torrent files not accessed within this number of'
' seconds can be reaped'),
cfg.IntOpt('torrent_listen_port_start',
default=6881,
help='Beginning of port range to listen on'),
cfg.IntOpt('torrent_listen_port_end',
default=6891,
help='End of port range to listen on'),
cfg.IntOpt('torrent_download_stall_cutoff',
default=600,
help='Number of seconds a download can remain at the same'
' progress percentage w/o being considered a stall'),
cfg.IntOpt('torrent_max_seeder_processes_per_host',
default=1,
help='Maximum number of seeder processes to run concurrently'
' within a given dom0. (-1 = no limit)')
]
CONF = cfg.CONF
CONF.register_opts(xenapi_torrent_opts, 'xenserver')
class BittorrentStore(object):
@staticmethod
def _lookup_torrent_url_fn():
"""Load a "fetcher" func to get the right torrent URL.
"""
if CONF.xenserver.torrent_base_url:
if '/' not in CONF.xenserver.torrent_base_url:
LOG.warn(_LW('Value specified in conf file for'
' xenserver.torrent_base_url does not contain a'
' slash character, therefore it will not be used'
' as part of the torrent URL. Specify a valid'
' base URL as defined by RFC 1808 (see step 6).'))
def _default_torrent_url_fn(image_id):
return urlparse.urljoin(CONF.xenserver.torrent_base_url,
"%s.torrent" % image_id)
return _default_torrent_url_fn
raise RuntimeError(_('Cannot create default bittorrent URL'
' without xenserver.torrent_base_url'
' configuration option set.'))
def download_image(self, context, session, instance, image_id):
params = {}
params['image_id'] = image_id
params['uuid_stack'] = vm_utils._make_uuid_stack()
params['sr_path'] = vm_utils.get_sr_path(session)
params['torrent_seed_duration'] = CONF.xenserver.torrent_seed_duration
params['torrent_seed_chance'] = CONF.xenserver.torrent_seed_chance
params['torrent_max_last_accessed'] = \
CONF.xenserver.torrent_max_last_accessed
params['torrent_listen_port_start'] = \
CONF.xenserver.torrent_listen_port_start
params['torrent_listen_port_end'] = \
CONF.xenserver.torrent_listen_port_end
params['torrent_download_stall_cutoff'] = \
CONF.xenserver.torrent_download_stall_cutoff
params['torrent_max_seeder_processes_per_host'] = \
CONF.xenserver.torrent_max_seeder_processes_per_host
lookup_fn = self._lookup_torrent_url_fn()
params['torrent_url'] = lookup_fn(image_id)
vdis = session.call_plugin_serialized(
'bittorrent', 'download_vhd', **params)
return vdis
def upload_image(self, context, session, instance, image_id, vdi_uuids):
raise NotImplementedError
| apache-2.0 |
heynemann/generator-python-package | app/templates/_setup.py | 1 | 2004 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of <%= package.name %>.
# <%= package.url %>
# Licensed under the <%= package.license %> license:
# http://www.opensource.org/licenses/<%= package.license%>-license
# Copyright (c) <%= package.created.year %>, <%= package.author.name %> <<%= package.author.email %>>
from setuptools import setup, find_packages
from <%= package.pythonName %> import __version__
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'tox',
'ipdb',
'coveralls',
'sphinx',
]
setup(
name='<%= package.name %>',
version=__version__,
description='<%= package.description %>',
long_description='''
<%= package.description %>
''',
keywords='<%= package.keywords %>',
author='<%= package.author.name %>',
author_email='<%= package.author.email %>',
url='<%= package.url %>',
license='<%= package.license %>',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: <%= package.license %> License',
'Natural Language :: English',
'Operating System :: Unix',
<% for (var i=0; i< package.troves.length; i++) { %>'<%= package.troves[i] %>',
<% } %>'Operating System :: OS Independent',
],
packages=find_packages(),
include_package_data=<%= package.includePackageData ? "True" : "False" %>,
install_requires=[
# add your dependencies here
# remember to use 'package-name>=x.y.z,<x.(y+1).0' notation
# (this way you get bugfixes but no breaking changes)
<%= package.services.mongodb ? " 'pymongo',\n" : ""
%><%= package.services.redis ? " 'redis',\n" : ""
%> ],
extras_require={
'tests': tests_require,
},
entry_points={
'console_scripts': [
# add cli scripts here in this form:
# '<%= package.name %>=<%= package.pythonName %>.cli:main',
],
},
)
| mit |
turbomanage/training-data-analyst | courses/developingapps/python/pubsub-languageapi-spanner/end/quiz/api/api.py | 21 | 2607 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from flask import Response
"""
Import shared GCP helper modules
"""
# TODO: Add pubsub to import list
from quiz.gcp import datastore, pubsub
# END TODO
"""
Gets list of questions from datastore
- Create query
- Filter on quiz
- Call the datastore helper to get back JSON
- Pretty print JSON
- Set header and return the response
"""
def get_questions(quiz_name):
questions = datastore.list_entities(quiz_name)
payload = {'questions': list(questions)}
payload = json.dumps(payload, indent=2, sort_keys=True)
response = Response(payload)
response.headers['Content-Type'] = 'application/json'
return response
"""
Grades submitted answers
- Get list of questions with correct answers from datastore
- Iterate through questions, find any submitted answers that match
- Count total number of questions for which there is >0 correct answers
- Compose and pretty print payload
- Compose and return response
"""
def get_grade(quiz_name, answers):
questions = datastore.list_entities(quiz_name, False)
score = len(list(filter(lambda x: x > 0,
list(map(lambda q:
len(list(filter(lambda answer:
answer['id'] == q['id'] and
int(answer['answer']) == q['correctAnswer'],
answers)))
, questions))
)))
payload = {'correct': score, 'total': len(questions)}
payload = json.dumps(payload, indent=2, sort_keys=True)
response = Response(payload)
response.headers['Content-Type'] = 'application/json'
return response
"""
Publish feedback
- Call pubsub helper
- Compose and return response
"""
def publish_feedback(feedback):
# TODO: Publish the feedback using your pubsub module, return the result
result = pubsub.publish_feedback(feedback)
response = Response(json.dumps(result, indent=2, sort_keys=True))
response.headers['Content-Type'] = 'application/json'
return response
# END TODO
| apache-2.0 |
heladio/my-blog | pelica-env/lib/python2.7/site-packages/pip/wheel.py | 34 | 22339 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import functools
import hashlib
import logging
import os
import re
import shutil
import stat
import sys
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
from pip.exceptions import InvalidWheelFilename, UnsupportedWheel
from pip.locations import distutils_scheme
from pip import pep425tags
from pip.utils import (call_subprocess, normalize_path, make_path_relative,
captured_stdout)
from pip.utils.logging import indent_log
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
if not os.path.exists(dest): # common for the 'include' path
os.makedirs(dest)
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
# is self.req.project_name case preserving?
s.lower().startswith(
req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
if not os.path.exists(destdir):
os.makedirs(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, wheel_dir, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self.wheel_dir = normalize_path(wheel_dir)
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req):
"""Build one wheel."""
base_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), "
"__file__, 'exec'))" % req.setup_py
] + list(self.global_options)
logger.info('Running setup.py bdist_wheel for %s', req.name)
logger.info('Destination directory: %s', self.wheel_dir)
wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] \
+ self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s', req.name)
return False
def build(self):
"""Build wheels."""
# unpack and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.is_wheel:
logger.info(
'Skipping %s, due to already being wheel.', req.name,
)
elif req.editable:
logger.info(
'Skipping %s, due to being editable', req.name,
)
else:
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
if self._build_one(req):
build_success.append(req)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| mit |
csgxy123/Dato-Core | src/unity/python/doc/source/conf.py | 13 | 9941 | # -*- coding: utf-8 -*-
#
# GraphLab Create documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 24 17:26:36 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Import graphlab and several submodules so that sphinx can find them.
# For example, it can now find graphlab.recommender.PopularityModel.
import graphlab
for m in [
'aws',
'boosted_trees_classifier',
'boosted_trees_regression',
'canvas',
'classifier',
'connected_components',
'data_matching',
'deeplearning',
'deploy',
'evaluation',
'graph_coloring',
'image_analysis',
'kcore',
'kmeans',
'linear_regression',
'logistic_classifier',
'load_model',
'model_parameter_search',
'nearest_neighbors',
'neuralnet_classifier',
'pagerank',
'recommender',
'regression',
'shortest_path',
'svm_classifier',
'text_analytics',
'topic_model',
'triangle_counting',
'vowpal_wabbit',
'extensions',
'distances'
]:
module_name = 'graphlab.' + m
sys.modules[module_name] = eval(module_name)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
print sys.path
#sys.path.insert(0, os.path.abspath('../venv/lib/python2.7/site-packages/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'numpydoc', 'sphinx.ext.coverage', 'sphinx.ext.mathjax',
'sphinx.ext.inheritance_diagram', 'sphinx.ext.autosummary', 'sphinx_graphlab_ext.autorun']
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index' #'graphlab'
# General information about the project.
project = u'GraphLab Create API Documentation'
copyright = u'2014, Dato, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{{VERSION_STRING}}'
# The full version, including alpha/beta/rc tags.
release = '{{VERSION_STRING}}_beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['*demo*', '*test*', 'test_*', '*cython*']
numpydoc_show_class_members = False
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Customizations -------------------
autodoc_default_flags = [] #'members']
#'private-members',
#'special-members',
#'show-inheritance']
def autodoc_skip_member(app, what, name, obj, skip, options):
exclusions = ('__weakref__', # special-members
'__doc__', '__module__', '__dict__', # undoc-members
)
exclude = name in exclusions
return skip or exclude
def setup(app):
app.connect('autodoc-skip-member', autodoc_skip_member)
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
html_theme_options = {
#'theme_globaltoc_depth': 3
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {'**': ['sidebartoc.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GraphLabCreatedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GraphLabCreate.tex', u'GraphLab Create Documentation',
u'Dato, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'graphlabcreate', u'GraphLab Create Documentation',
[u'Dato, Inc.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GraphLabCreate', u'GraphLab Create Documentation',
u'Dato, Inc.', 'GraphLabCreate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| agpl-3.0 |
fran-penedo/dreal | benchmarks/network/airplane/gen.py | 55 | 1248 |
flow_var = {}
flow_dec = {}
state_dec = {}
state_val = {}
cont_cond = {}
jump_cond = {}
def getHdr(n):
res = []
for i in range(n):
getHdr.counter += 1
res.append(getHdr.counter)
return res
getHdr.counter = 0
######################
# Formula generation #
######################
def print_loop(bound, steps, keys, holder):
c = 0
while True:
for j in range(steps):
hd = getHdr(holder)
for i in keys:
print(cont_cond[i][j].format(c,*hd).strip())
if c >= bound:
return
for i in keys:
print(jump_cond[i][j].format(c,c+1).strip())
c += 1
def generate(bound, steps, keys, holder, init, goal):
print("(set-logic QF_NRA_ODE)")
for i in keys:
print(flow_var[i].strip())
for i in keys:
print(flow_dec[i].strip())
for b in range(bound + 1):
for i in keys:
print(state_dec[i].format(b).strip())
for b in range(bound + 1):
for i in keys:
print(state_val[i].format(b).strip())
print(init.format(0).strip())
print_loop(bound, steps, keys, holder)
print(goal.format(bound).strip())
print("(check-sat)\n(exit)")
| gpl-3.0 |
bckwltn/SickRage | tornado/stack_context.py | 248 | 13174 | #!/usr/bin/env python
#
# Copyright 2010 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""`StackContext` allows applications to maintain threadlocal-like state
that follows execution as it moves to other execution contexts.
The motivating examples are to eliminate the need for explicit
``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to
allow some additional context to be kept for logging.
This is slightly magic, but it's an extension of the idea that an
exception handler is a kind of stack-local state and when that stack
is suspended and resumed in a new context that state needs to be
preserved. `StackContext` shifts the burden of restoring that state
from each call site (e.g. wrapping each `.AsyncHTTPClient` callback
in ``async_callback``) to the mechanisms that transfer control from
one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`,
thread pools, etc).
Example usage::
@contextlib.contextmanager
def die_on_error():
try:
yield
except Exception:
logging.error("exception in asynchronous operation",exc_info=True)
sys.exit(1)
with StackContext(die_on_error):
# Any exception thrown here *or in callback and its descendants*
# will cause the process to exit instead of spinning endlessly
# in the ioloop.
http_client.fetch(url, callback)
ioloop.start()
Most applications shouldn't have to work with `StackContext` directly.
Here are a few rules of thumb for when it's necessary:
* If you're writing an asynchronous library that doesn't rely on a
stack_context-aware library like `tornado.ioloop` or `tornado.iostream`
(for example, if you're writing a thread pool), use
`.stack_context.wrap()` before any asynchronous operations to capture the
stack context from where the operation was started.
* If you're writing an asynchronous library that has some shared
resources (such as a connection pool), create those shared resources
within a ``with stack_context.NullContext():`` block. This will prevent
``StackContexts`` from leaking from one request to another.
* If you want to write something like an exception handler that will
persist across asynchronous calls, create a new `StackContext` (or
`ExceptionStackContext`), and make your asynchronous calls in a ``with``
block that references your `StackContext`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
import threading
from tornado.util import raise_exc_info
class StackContextInconsistentError(Exception):
pass
class _State(threading.local):
def __init__(self):
self.contexts = (tuple(), None)
_state = _State()
class StackContext(object):
"""Establishes the given context as a StackContext that will be transferred.
Note that the parameter is a callable that returns a context
manager, not the context itself. That is, where for a
non-transferable context manager you would say::
with my_context():
StackContext takes the function itself rather than its result::
with StackContext(my_context):
The result of ``with StackContext() as cb:`` is a deactivation
callback. Run this callback when the StackContext is no longer
needed to ensure that it is not propagated any further (note that
deactivating a context does not affect any instances of that
context that are currently pending). This is an advanced feature
and not necessary in most applications.
"""
def __init__(self, context_factory):
self.context_factory = context_factory
self.contexts = []
self.active = True
def _deactivate(self):
self.active = False
# StackContext protocol
def enter(self):
context = self.context_factory()
self.contexts.append(context)
context.__enter__()
def exit(self, type, value, traceback):
context = self.contexts.pop()
context.__exit__(type, value, traceback)
# Note that some of this code is duplicated in ExceptionStackContext
# below. ExceptionStackContext is more common and doesn't need
# the full generality of this class.
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0] + (self,), self)
_state.contexts = self.new_contexts
try:
self.enter()
except:
_state.contexts = self.old_contexts
raise
return self._deactivate
def __exit__(self, type, value, traceback):
try:
self.exit(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
# Generator coroutines and with-statements with non-local
# effects interact badly. Check here for signs of
# the stack getting out of sync.
# Note that this check comes after restoring _state.context
# so that if it fails things are left in a (relatively)
# consistent state.
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class ExceptionStackContext(object):
"""Specialization of StackContext for exception handling.
The supplied ``exception_handler`` function will be called in the
event of an uncaught exception in this context. The semantics are
similar to a try/finally clause, and intended use cases are to log
an error, close a socket, or similar cleanup actions. The
``exc_info`` triple ``(type, value, traceback)`` will be passed to the
exception_handler function.
If the exception handler returns true, the exception will be
consumed and will not be propagated to other exception handlers.
"""
def __init__(self, exception_handler):
self.exception_handler = exception_handler
self.active = True
def _deactivate(self):
self.active = False
def exit(self, type, value, traceback):
if type is not None:
return self.exception_handler(type, value, traceback)
def __enter__(self):
self.old_contexts = _state.contexts
self.new_contexts = (self.old_contexts[0], self)
_state.contexts = self.new_contexts
return self._deactivate
def __exit__(self, type, value, traceback):
try:
if type is not None:
return self.exception_handler(type, value, traceback)
finally:
final_contexts = _state.contexts
_state.contexts = self.old_contexts
if final_contexts is not self.new_contexts:
raise StackContextInconsistentError(
'stack_context inconsistency (may be caused by yield '
'within a "with StackContext" block)')
# Break up a reference to itself to allow for faster GC on CPython.
self.new_contexts = None
class NullContext(object):
"""Resets the `StackContext`.
Useful when creating a shared resource on demand (e.g. an
`.AsyncHTTPClient`) where the stack that caused the creating is
not relevant to future operations.
"""
def __enter__(self):
self.old_contexts = _state.contexts
_state.contexts = (tuple(), None)
def __exit__(self, type, value, traceback):
_state.contexts = self.old_contexts
def _remove_deactivated(contexts):
"""Remove deactivated handlers from the chain"""
# Clean ctx handlers
stack_contexts = tuple([h for h in contexts[0] if h.active])
# Find new head
head = contexts[1]
while head is not None and not head.active:
head = head.old_contexts[1]
# Process chain
ctx = head
while ctx is not None:
parent = ctx.old_contexts[1]
while parent is not None:
if parent.active:
break
ctx.old_contexts = parent.old_contexts
parent = parent.old_contexts[1]
ctx = parent
return (stack_contexts, head)
def wrap(fn):
"""Returns a callable object that will restore the current `StackContext`
when executed.
Use this whenever saving a callback to be executed later in a
different execution context (either in a different thread or
asynchronously in the same thread).
"""
# Check if function is already wrapped
if fn is None or hasattr(fn, '_wrapped'):
return fn
# Capture current stack head
# TODO: Any other better way to store contexts and update them in wrapped function?
cap_contexts = [_state.contexts]
if not cap_contexts[0][0] and not cap_contexts[0][1]:
# Fast path when there are no active contexts.
def null_wrapper(*args, **kwargs):
try:
current_state = _state.contexts
_state.contexts = cap_contexts[0]
return fn(*args, **kwargs)
finally:
_state.contexts = current_state
null_wrapper._wrapped = True
return null_wrapper
def wrapped(*args, **kwargs):
ret = None
try:
# Capture old state
current_state = _state.contexts
# Remove deactivated items
cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0])
# Force new state
_state.contexts = contexts
# Current exception
exc = (None, None, None)
top = None
# Apply stack contexts
last_ctx = 0
stack = contexts[0]
# Apply state
for n in stack:
try:
n.enter()
last_ctx += 1
except:
# Exception happened. Record exception info and store top-most handler
exc = sys.exc_info()
top = n.old_contexts[1]
# Execute callback if no exception happened while restoring state
if top is None:
try:
ret = fn(*args, **kwargs)
except:
exc = sys.exc_info()
top = contexts[1]
# If there was exception, try to handle it by going through the exception chain
if top is not None:
exc = _handle_exception(top, exc)
else:
# Otherwise take shorter path and run stack contexts in reverse order
while last_ctx > 0:
last_ctx -= 1
c = stack[last_ctx]
try:
c.exit(*exc)
except:
exc = sys.exc_info()
top = c.old_contexts[1]
break
else:
top = None
# If if exception happened while unrolling, take longer exception handler path
if top is not None:
exc = _handle_exception(top, exc)
# If exception was not handled, raise it
if exc != (None, None, None):
raise_exc_info(exc)
finally:
_state.contexts = current_state
return ret
wrapped._wrapped = True
return wrapped
def _handle_exception(tail, exc):
while tail is not None:
try:
if tail.exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
tail = tail.old_contexts[1]
return exc
def run_with_stack_context(context, func):
"""Run a coroutine ``func`` in the given `StackContext`.
It is not safe to have a ``yield`` statement within a ``with StackContext``
block, so it is difficult to use stack context with `.gen.coroutine`.
This helper function runs the function in the correct context while
keeping the ``yield`` and ``with`` statements syntactically separate.
Example::
@gen.coroutine
def incorrect():
with StackContext(ctx):
# ERROR: this will raise StackContextInconsistentError
yield other_coroutine()
@gen.coroutine
def correct():
yield run_with_stack_context(StackContext(ctx), other_coroutine)
.. versionadded:: 3.1
"""
with context:
return func()
| gpl-3.0 |
ddelemeny/calligra | devtools/scripts/ircbuildbot.py | 6 | 2607 | #!/usr/bin/python -Qwarnall
# -*- coding: utf-8 -*-
# This is a simple irc bot that reports progress to the Calligra irc channel
import time, lxml.etree, urllib2, re, sys, socket, string
HOST='irc.freenode.org' #The server we want to connect to
PORT=6667 #The connection port which is usually 6667
NICK='buildbot_py' #The bot's nickname
IDENT='buildbot_py'
REALNAME='James Spawned'
OWNER='vandenoever' #The bot owner's nick
CHANNELINIT='#Calligra' #The default channel for the bot
readbuffer='' #Here we store all the messages from server
feed = "http://158.36.191.251:8080/guestAuth/feed.html?buildTypeId=bt6&itemsType=builds&buildStatus=failed&userKey=guest"
s = socket.socket( ) #Create the socket
s.connect((HOST, PORT)) #Connect to server
s.send('NICK '+NICK+'\n') #Send the nick to server
s.send('USER '+IDENT+' '+HOST+' bla :'+REALNAME+'\n') #Identify to server
def getMessage():
try:
parser = lxml.etree.XMLParser(dtd_validation=False, load_dtd=False, resolve_entities=False, no_network=False, recover=False)
tree = lxml.etree.parse(urllib2.urlopen(feed))
ns = {'a':'http://www.w3.org/2005/Atom'}
link = tree.xpath("/a:feed/a:entry[1]/a:link/@href", namespaces=ns)[0]
title = tree.xpath("/a:feed/a:entry[1]/a:title/text()", namespaces=ns)[0]
summary = tree.xpath("/a:feed/a:entry[1]/a:summary/text()", namespaces=ns)[0]
s = re.search('strong>([^<]*)<', summary).group(1)
newmessage = title + " " + s + " " + link
try:
who = re.search('by\s+(\S*)', summary).group(1)
newmessage = who + ": " + newmessage
except:
pass
except:
newmessage = "Error in reading RSS"
return newmessage
joined = False
message = ""
lastchecktime = time.time() - 55
while 1:
line = s.recv(500) #receive server messages
print line.rstrip() #server message is output
if not joined:
s.send('JOIN ' + CHANNELINIT + '\n') #Join a channel
s.send("PRIVMSG " + CHANNELINIT + " :Spawned, James Spawned\n")
joined = True
if line[:4] == "PING":
s.send("PONG\n")
if time.time() - lastchecktime > 60:
newmessage = getMessage()
if newmessage != message:
message = newmessage
s.send("PRIVMSG " + CHANNELINIT + " :" + message + "\n")
lastchecktime = time.time()
| gpl-2.0 |
yury-s/v8-inspector | Source/chrome/tools/perf/measurements/thread_times_unittest.py | 2 | 2450 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import wpr_modes
from telemetry import decorators
from telemetry.page import page
from telemetry.unittest_util import options_for_unittests
from telemetry.unittest_util import page_test_test_case
from measurements import thread_times
from metrics import timeline
class AnimatedPage(page.Page):
def __init__(self, page_set):
super(AnimatedPage, self).__init__(
url='file://animated_page.html',
page_set=page_set, base_dir=page_set.base_dir)
def RunPageInteractions(self, action_runner):
action_runner.Wait(.2)
class ThreadTimesUnitTest(page_test_test_case.PageTestTestCase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
@decorators.Disabled('android')
def testBasic(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = thread_times.ThreadTimes()
timeline_options = self._options
results = self.RunMeasurement(measurement, ps, options = timeline_options)
self.assertFalse(len(results.failures), results.failures)
for interval in timeline.IntervalNames:
for category in timeline.TimelineThreadCategories.values():
cpu_time_name = timeline.ThreadCpuTimeResultName(category, interval)
cpu_time = results.FindAllPageSpecificValuesNamed(cpu_time_name)
self.assertEquals(len(cpu_time), 1)
@decorators.Disabled('chromeos') # crbug.com/483212
def testWithSilkDetails(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = thread_times.ThreadTimes(report_silk_details=True)
results = self.RunMeasurement(measurement, ps, options = self._options)
self.assertEquals(0, len(results.failures))
main_thread = "renderer_main"
expected_trace_categories = ["blink", "cc", "idle"]
for interval in timeline.IntervalNames:
for trace_category in expected_trace_categories:
value_name = timeline.ThreadDetailResultName(
main_thread, interval, trace_category)
values = results.FindAllPageSpecificValuesNamed(value_name)
self.assertEquals(len(values), 1)
def testCleanUpTrace(self):
self.TestTracingCleanedUp(thread_times.ThreadTimes, self._options)
| bsd-3-clause |
FlorentChamault/My_sickbeard | lib/hachoir_parser/program/elf.py | 90 | 7120 | """
ELF (Unix/BSD executable file format) parser.
Author: Victor Stinner
Creation date: 08 may 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum,
String, Bytes)
from lib.hachoir_core.text_handler import textHandler, hexadecimal
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
class ElfHeader(FieldSet):
static_size = 52*8
LITTLE_ENDIAN_ID = 1
BIG_ENDIAN_ID = 2
MACHINE_NAME = {
1: u"AT&T WE 32100",
2: u"SPARC",
3: u"Intel 80386",
4: u"Motorola 68000",
5: u"Motorola 88000",
7: u"Intel 80860",
8: u"MIPS RS3000"
}
CLASS_NAME = {
1: u"32 bits",
2: u"64 bits"
}
TYPE_NAME = {
0: u"No file type",
1: u"Relocatable file",
2: u"Executable file",
3: u"Shared object file",
4: u"Core file",
0xFF00: u"Processor-specific (0xFF00)",
0xFFFF: u"Processor-specific (0xFFFF)"
}
ENDIAN_NAME = {
LITTLE_ENDIAN_ID: "Little endian",
BIG_ENDIAN_ID: "Big endian",
}
def createFields(self):
yield Bytes(self, "signature", 4, r'ELF signature ("\x7fELF")')
yield Enum(UInt8(self, "class", "Class"), self.CLASS_NAME)
yield Enum(UInt8(self, "endian", "Endian"), self.ENDIAN_NAME)
yield UInt8(self, "file_version", "File version")
yield String(self, "pad", 8, "Pad")
yield UInt8(self, "nb_ident", "Size of ident[]")
yield Enum(UInt16(self, "type", "File type"), self.TYPE_NAME)
yield Enum(UInt16(self, "machine", "Machine type"), self.MACHINE_NAME)
yield UInt32(self, "version", "ELF format version")
yield UInt32(self, "entry", "Number of entries")
yield UInt32(self, "phoff", "Program header offset")
yield UInt32(self, "shoff", "Section header offset")
yield UInt32(self, "flags", "Flags")
yield UInt16(self, "ehsize", "Elf header size (this header)")
yield UInt16(self, "phentsize", "Program header entry size")
yield UInt16(self, "phnum", "Program header entry count")
yield UInt16(self, "shentsize", "Section header entry size")
yield UInt16(self, "shnum", "Section header entre count")
yield UInt16(self, "shstrndx", "Section header strtab index")
def isValid(self):
if self["signature"].value != "\x7FELF":
return "Wrong ELF signature"
if self["class"].value not in self.CLASS_NAME:
return "Unknown class"
if self["endian"].value not in self.ENDIAN_NAME:
return "Unknown endian (%s)" % self["endian"].value
return ""
class SectionHeader32(FieldSet):
static_size = 40*8
TYPE_NAME = {
8: "BSS"
}
def createFields(self):
yield UInt32(self, "name", "Name")
yield Enum(UInt32(self, "type", "Type"), self.TYPE_NAME)
yield UInt32(self, "flags", "Flags")
yield textHandler(UInt32(self, "VMA", "Virtual memory address"), hexadecimal)
yield textHandler(UInt32(self, "LMA", "Logical memory address (in file)"), hexadecimal)
yield textHandler(UInt32(self, "size", "Size"), hexadecimal)
yield UInt32(self, "link", "Link")
yield UInt32(self, "info", "Information")
yield UInt32(self, "addr_align", "Address alignment")
yield UInt32(self, "entry_size", "Entry size")
def createDescription(self):
return "Section header (name: %s, type: %s)" % \
(self["name"].value, self["type"].display)
class ProgramHeader32(FieldSet):
TYPE_NAME = {
3: "Dynamic library"
}
static_size = 32*8
def createFields(self):
yield Enum(UInt16(self, "type", "Type"), ProgramHeader32.TYPE_NAME)
yield UInt16(self, "flags", "Flags")
yield UInt32(self, "offset", "Offset")
yield textHandler(UInt32(self, "vaddr", "V. address"), hexadecimal)
yield textHandler(UInt32(self, "paddr", "P. address"), hexadecimal)
yield UInt32(self, "file_size", "File size")
yield UInt32(self, "mem_size", "Memory size")
yield UInt32(self, "align", "Alignment")
yield UInt32(self, "xxx", "???")
def createDescription(self):
return "Program Header (%s)" % self["type"].display
def sortSection(a, b):
return int(a["offset"] - b["offset"])
#class Sections(FieldSet):
# def createFields?(self, stream, parent, sections):
# for section in sections:
# ofs = section["offset"]
# size = section["file_size"]
# if size != 0:
# sub = stream.createSub(ofs, size)
# #yield DeflateFilter(self, "section[]", sub, size, Section, "Section"))
# chunk = self.doRead("section[]", "Section", (Section,), {"stream": sub})
# else:
# chunk = self.doRead("section[]", "Section", (FormatChunk, "string[0]"))
# chunk.description = "ELF section (in file: %s..%s)" % (ofs, ofs+size)
class ElfFile(Parser):
PARSER_TAGS = {
"id": "elf",
"category": "program",
"file_ext": ("so", ""),
"min_size": ElfHeader.static_size, # At least one program header
"mime": (
u"application/x-executable",
u"application/x-object",
u"application/x-sharedlib",
u"application/x-executable-file",
u"application/x-coredump"),
"magic": (("\x7FELF", 0),),
"description": "ELF Unix/BSD program/library"
}
endian = LITTLE_ENDIAN
def validate(self):
err = self["header"].isValid()
if err:
return err
return True
def createFields(self):
# Choose the right endian depending on endian specified in header
if self.stream.readBits(5*8, 8, BIG_ENDIAN) == ElfHeader.BIG_ENDIAN_ID:
self.endian = BIG_ENDIAN
else:
self.endian = LITTLE_ENDIAN
# Parse header and program headers
yield ElfHeader(self, "header", "Header")
for index in xrange(self["header/phnum"].value):
yield ProgramHeader32(self, "prg_header[]")
if False:
raise ParserError("TODO: Parse sections...")
#sections = self.array("prg_header")
#size = self["header/shoff"].value - self.current_size//8
#chunk = self.doRead("data", "Data", (DeflateFilter, stream, size, Sections, sections))
#chunk.description = "Sections (use an evil hack to manage share same data on differents parts)"
#assert self.current_size//8 == self["header/shoff"].value
else:
raw = self.seekByte(self["header/shoff"].value, "raw[]", relative=False)
if raw:
yield raw
for index in xrange(self["header/shnum"].value):
yield SectionHeader32(self, "section_header[]")
def createDescription(self):
return "ELF Unix/BSD program/library: %s" % (
self["header/class"].display)
| gpl-3.0 |
grupoprog3/proyecto_final | proyecto/flask/Lib/site-packages/sqlalchemy/orm/events.py | 17 | 84907 | # orm/events.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""ORM event interfaces.
"""
from .. import event, exc, util
from .base import _mapper_or_none
import inspect
import weakref
from . import interfaces
from . import mapperlib, instrumentation
from .session import Session, sessionmaker
from .scoping import scoped_session
from .attributes import QueryableAttribute
from .query import Query
from sqlalchemy.util.compat import inspect_getargspec
class InstrumentationEvents(event.Events):
"""Events related to class instrumentation events.
The listeners here support being established against
any new style class, that is any object that is a subclass
of 'type'. Events will then be fired off for events
against that class. If the "propagate=True" flag is passed
to event.listen(), the event will fire off for subclasses
of that class as well.
The Python ``type`` builtin is also accepted as a target,
which when used has the effect of events being emitted
for all classes.
Note the "propagate" flag here is defaulted to ``True``,
unlike the other class level events where it defaults
to ``False``. This means that new subclasses will also
be the subject of these events, when a listener
is established on a superclass.
.. versionchanged:: 0.8 - events here will emit based
on comparing the incoming class to the type of class
passed to :func:`.event.listen`. Previously, the
event would fire for any class unconditionally regardless
of what class was sent for listening, despite
documentation which stated the contrary.
"""
_target_class_doc = "SomeBaseClass"
_dispatch_target = instrumentation.InstrumentationFactory
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
return _InstrumentationEventsHold(target)
else:
return None
@classmethod
def _listen(cls, event_key, propagate=True, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
def listen(target_cls, *arg):
listen_cls = target()
if propagate and issubclass(target_cls, listen_cls):
return fn(target_cls, *arg)
elif not propagate and target_cls is listen_cls:
return fn(target_cls, *arg)
def remove(ref):
key = event.registry._EventKey(
None, identifier, listen,
instrumentation._instrumentation_factory)
getattr(instrumentation._instrumentation_factory.dispatch,
identifier).remove(key)
target = weakref.ref(target.class_, remove)
event_key.\
with_dispatch_target(instrumentation._instrumentation_factory).\
with_wrapper(listen).base_listen(**kw)
@classmethod
def _clear(cls):
super(InstrumentationEvents, cls)._clear()
instrumentation._instrumentation_factory.dispatch._clear()
def class_instrument(self, cls):
"""Called after the given class is instrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def class_uninstrument(self, cls):
"""Called before the given class is uninstrumented.
To get at the :class:`.ClassManager`, use
:func:`.manager_of_class`.
"""
def attribute_instrument(self, cls, key, inst):
"""Called when an attribute is instrumented."""
class _InstrumentationEventsHold(object):
"""temporary marker object used to transfer from _accept_with() to
_listen() on the InstrumentationEvents class.
"""
def __init__(self, class_):
self.class_ = class_
dispatch = event.dispatcher(InstrumentationEvents)
class InstanceEvents(event.Events):
"""Define events specific to object lifecycle.
e.g.::
from sqlalchemy import event
def my_load_listener(target, context):
print "on load!"
event.listen(SomeClass, 'load', my_load_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 instance events can be associated with
unmapped superclasses of mapped classes.
Instance events are closely related to mapper events, but
are more specific to the instance and its instrumentation,
rather than its system of persistence.
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting classes as well as the
class which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
"""
_target_class_doc = "SomeClass"
_dispatch_target = instrumentation.ClassManager
@classmethod
def _new_classmanager_instance(cls, class_, classmanager):
_InstanceEventsHold.populate(class_, classmanager)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if isinstance(target, instrumentation.ClassManager):
return target
elif isinstance(target, mapperlib.Mapper):
return target.class_manager
elif target is orm.mapper:
return instrumentation.ClassManager
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return instrumentation.ClassManager
else:
manager = instrumentation.manager_of_class(target)
if manager:
return manager
else:
return _InstanceEventsHold(target)
return None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if not raw:
def wrap(state, *arg, **kw):
return fn(state.obj(), *arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate, **kw)
if propagate:
for mgr in target.subclass_managers(True):
event_key.with_dispatch_target(mgr).base_listen(
propagate=True)
@classmethod
def _clear(cls):
super(InstanceEvents, cls)._clear()
_InstanceEventsHold._clear()
def first_init(self, manager, cls):
"""Called when the first instance of a particular mapping is called.
This event is called when the ``__init__`` method of a class
is called the first time for that particular class. The event
invokes before ``__init__`` actually proceeds as well as before
the :meth:`.InstanceEvents.init` event is invoked.
"""
def init(self, target, args, kwargs):
"""Receive an instance when its constructor is called.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is
loaded from the database; see the :meth:`.InstanceEvents.load`
event in order to intercept a database load.
The event is called before the actual ``__init__`` constructor
of the object is called. The ``kwargs`` dictionary may be
modified in-place in order to affect what is passed to
``__init__``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments passed to the ``__init__`` method.
This is passed as a tuple and is currently immutable.
:param kwargs: keyword arguments passed to the ``__init__`` method.
This structure *can* be altered in place.
.. seealso::
:meth:`.InstanceEvents.init_failure`
:meth:`.InstanceEvents.load`
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when its constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object, in conjunction with the object's constructor, e.g.
its ``__init__`` method. It is not called when an object is loaded
from the database.
The event is invoked after an exception raised by the ``__init__``
method is caught. After the event
is invoked, the original exception is re-raised outwards, so that
the construction of the object still raises an exception. The
actual exception and stack trace raised should be present in
``sys.exc_info()``.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param args: positional arguments that were passed to the ``__init__``
method.
:param kwargs: keyword arguments that were passed to the ``__init__``
method.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.load`
"""
def load(self, target, context):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress. This argument may be
``None`` if the load does not correspond to a :class:`.Query`,
such as during :meth:`.Session.merge`.
.. seealso::
:meth:`.InstanceEvents.init`
:meth:`.InstanceEvents.refresh`
:meth:`.SessionEvents.loaded_as_persistent`
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed from a query.
Contrast this to the :meth:`.InstanceEvents.load` method, which
is invoked when the object is first loaded from a query.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress.
:param attrs: sequence of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
.. seealso::
:meth:`.InstanceEvents.load`
"""
def refresh_flush(self, target, flush_context, attrs):
"""Receive an object instance after one or more attributes have
been refreshed within the persistence of the object.
This event is the same as :meth:`.InstanceEvents.refresh` except
it is invoked within the unit of work flush process, and the values
here typically come from the process of handling an INSERT or
UPDATE, such as via the RETURNING clause or from Python-side default
values.
.. versionadded:: 1.0.5
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param attrs: sequence of attribute names which
were populated.
"""
def expire(self, target, attrs):
"""Receive an object instance after its attributes or some subset
have been expired.
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: sequence of attribute
names which were expired, or None if all attributes were
expired.
"""
def pickle(self, target, state_dict):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after its associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class _EventsHold(event.RefCollection):
"""Hold onto listeners against unmapped, uninstrumented classes.
Establish _listen() for that class' mapper/instrumentation when
those objects are created for that class.
"""
def __init__(self, class_):
self.class_ = class_
@classmethod
def _clear(cls):
cls.all_holds.clear()
class HoldEvents(object):
_dispatch_target = None
@classmethod
def _listen(cls, event_key, raw=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if target.class_ in target.all_holds:
collection = target.all_holds[target.class_]
else:
collection = target.all_holds[target.class_] = {}
event.registry._stored_in_collection(event_key, target)
collection[event_key._key] = (event_key, raw, propagate)
if propagate:
stack = list(target.class_.__subclasses__())
while stack:
subclass = stack.pop(0)
stack.extend(subclass.__subclasses__())
subject = target.resolve(subclass)
if subject is not None:
# we are already going through __subclasses__()
# so leave generic propagate flag False
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False, **kw)
def remove(self, event_key):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
if isinstance(target, _EventsHold):
collection = target.all_holds[target.class_]
del collection[event_key._key]
@classmethod
def populate(cls, class_, subject):
for subclass in class_.__mro__:
if subclass in cls.all_holds:
collection = cls.all_holds[subclass]
for event_key, raw, propagate in collection.values():
if propagate or subclass is class_:
# since we can't be sure in what order different
# classes in a hierarchy are triggered with
# populate(), we rely upon _EventsHold for all event
# assignment, instead of using the generic propagate
# flag.
event_key.with_dispatch_target(subject).\
listen(raw=raw, propagate=False)
class _InstanceEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return instrumentation.manager_of_class(class_)
class HoldInstanceEvents(_EventsHold.HoldEvents, InstanceEvents):
pass
dispatch = event.dispatcher(HoldInstanceEvents)
class MapperEvents(event.Events):
"""Define events specific to mappings.
e.g.::
from sqlalchemy import event
def my_before_insert_listener(mapper, connection, target):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeClass,
# to execute during the "before_insert" hook
event.listen(
SomeClass, 'before_insert', my_before_insert_listener)
Available targets include:
* mapped classes
* unmapped superclasses of mapped or to-be-mapped classes
(using the ``propagate=True`` flag)
* :class:`.Mapper` objects
* the :class:`.Mapper` class itself and the :func:`.mapper`
function indicate listening for all mappers.
.. versionchanged:: 0.8.0 mapper events can be associated with
unmapped superclasses of mapped classes.
Mapper events provide hooks into critical sections of the
mapper, including those related to object instrumentation,
object loading, and object persistence. In particular, the
persistence methods :meth:`~.MapperEvents.before_insert`,
and :meth:`~.MapperEvents.before_update` are popular
places to augment the state being persisted - however, these
methods operate with several significant restrictions. The
user is encouraged to evaluate the
:meth:`.SessionEvents.before_flush` and
:meth:`.SessionEvents.after_flush` methods as more
flexible and user-friendly hooks in which to apply
additional database state during a flush.
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers and/or the mappers of
inheriting classes, as well as any
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
* ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event
processing normally.
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners.
"""
_target_class_doc = "SomeClass"
_dispatch_target = mapperlib.Mapper
@classmethod
def _new_mapper_instance(cls, class_, mapper):
_MapperEventsHold.populate(class_, mapper)
@classmethod
@util.dependencies("sqlalchemy.orm")
def _accept_with(cls, orm, target):
if target is orm.mapper:
return mapperlib.Mapper
elif isinstance(target, type):
if issubclass(target, mapperlib.Mapper):
return target
else:
mapper = _mapper_or_none(target)
if mapper is not None:
return mapper
else:
return _MapperEventsHold(target)
else:
return target
@classmethod
def _listen(
cls, event_key, raw=False, retval=False, propagate=False, **kw):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if identifier in ("before_configured", "after_configured") and \
target is not mapperlib.Mapper:
util.warn(
"'before_configured' and 'after_configured' ORM events "
"only invoke with the mapper() function or Mapper class "
"as the target.")
if not raw or not retval:
if not raw:
meth = getattr(cls, identifier)
try:
target_index = \
inspect_getargspec(meth)[0].index('target') - 1
except ValueError:
target_index = None
def wrap(*arg, **kw):
if not raw and target_index is not None:
arg = list(arg)
arg[target_index] = arg[target_index].obj()
if not retval:
fn(*arg, **kw)
return interfaces.EXT_CONTINUE
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
if propagate:
for mapper in target.self_and_descendants:
event_key.with_dispatch_target(mapper).base_listen(
propagate=True, **kw)
else:
event_key.base_listen(**kw)
@classmethod
def _clear(cls):
super(MapperEvents, cls)._clear()
_MapperEventsHold._clear()
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
Most attributes of the mapper are not yet initialized.
This listener can either be applied to the :class:`.Mapper`
class overall, or to any un-mapped class which serves as a base
for classes that will be mapped (using the ``propagate=True`` flag)::
Base = declarative_base()
@event.listens_for(Base, "instrument_class", propagate=True)
def on_new_class(mapper, cls_):
" ... "
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
"""
def mapper_configured(self, mapper, class_):
"""Called when a specific mapper has completed its own configuration
within the scope of the :func:`.configure_mappers` call.
The :meth:`.MapperEvents.mapper_configured` event is invoked
for each mapper that is encountered when the
:func:`.orm.configure_mappers` function proceeds through the current
list of not-yet-configured mappers.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
When the event is called, the mapper should be in its final
state, but **not including backrefs** that may be invoked from
other mappers; they might still be pending within the
configuration operation. Bidirectional relationships that
are instead configured via the
:paramref:`.orm.relationship.back_populates` argument
*will* be fully available, since this style of relationship does not
rely upon other possibly-not-configured mappers to know that they
exist.
For an event that is guaranteed to have **all** mappers ready
to go including backrefs that are defined only on other
mappings, use the :meth:`.MapperEvents.after_configured`
event; this event invokes only after all known mappings have been
fully configured.
The :meth:`.MapperEvents.mapper_configured` event, unlike
:meth:`.MapperEvents.before_configured` or
:meth:`.MapperEvents.after_configured`,
is called for each mapper/class individually, and the mapper is
passed to the event itself. It also is called exactly once for
a particular mapper. The event is therefore useful for
configurational steps that benefit from being invoked just once
on a specific mapper basis, which don't require that "backref"
configurations are necessarily ready yet.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param class\_: the mapped class.
.. seealso::
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
"""
# TODO: need coverage for this event
def before_configured(self):
"""Called before a series of mappers have been configured.
The :meth:`.MapperEvents.before_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, before the function has done any of its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured")
def go():
# ...
Constrast this event to :meth:`.MapperEvents.after_configured`,
which is invoked after the series of mappers has been configured,
as well as :meth:`.MapperEvents.mapper_configured`, which is invoked
on a per-mapper basis as each one is configured to the extent possible.
Theoretically this event is called once per
application, but is actually called any time new mappers
are to be affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "before_configured", once=True)
def go():
# ...
.. versionadded:: 0.9.3
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.after_configured`
"""
def after_configured(self):
"""Called after a series of mappers have been configured.
The :meth:`.MapperEvents.after_configured` event is invoked
each time the :func:`.orm.configure_mappers` function is
invoked, after the function has completed its work.
:func:`.orm.configure_mappers` is typically invoked
automatically as mappings are first used, as well as each time
new mappers have been made available and new mapper use is
detected.
Contrast this event to the :meth:`.MapperEvents.mapper_configured`
event, which is called on a per-mapper basis while the configuration
operation proceeds; unlike that event, when this event is invoked,
all cross-configurations (e.g. backrefs) will also have been made
available for any mappers that were pending.
Also constrast to :meth:`.MapperEvents.before_configured`,
which is invoked before the series of mappers has been configured.
This event can **only** be applied to the :class:`.Mapper` class
or :func:`.mapper` function, and not to individual mappings or
mapped classes. It is only invoked for all mappings as a whole::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured")
def go():
# ...
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`.orm.configure_mappers`
call. If new mappings are constructed after existing ones have
already been used, this event will likely be called again. To ensure
that a particular event is only called once and no further, the
``once=True`` argument (new in 0.9.4) can be applied::
from sqlalchemy.orm import mapper
@event.listens_for(mapper, "after_configured", once=True)
def go():
# ...
.. seealso::
:meth:`.MapperEvents.mapper_configured`
:meth:`.MapperEvents.before_configured`
"""
def before_insert(self, mapper, connection, target):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class before their INSERT statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_insert(self, mapper, connection, target):
"""Receive an object instance after an INSERT statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
same class after their INSERT statements have been
emitted at once in a previous step. In the extremely
rare case that this is not desirable, the
:func:`.mapper` can be configured with ``batch=False``,
which will cause batches of instances to be broken up
into individual (and more poorly performing)
event->persist->event steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_update(self, mapper, connection, target):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.before_update` is
*not* a guarantee that an UPDATE statement will be
issued, although you can affect the outcome here by
modifying attributes so that a net change in value does
exist.
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class before their UPDATE statements are emitted at
once in a later step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_update(self, mapper, connection, target):
"""Receive an object instance after an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
collections are modified. If, at update time, no
column-based attributes have any net changes, no UPDATE
statement will be issued. This means that an instance
being sent to :meth:`~.MapperEvents.after_update` is
*not* a guarantee that an UPDATE statement has been
issued.
To detect if the column-based attributes on the object have net
changes, and therefore resulted in an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
The event is often called for a batch of objects of the
same class after their UPDATE statements have been emitted at
once in a previous step. In the extremely rare case that
this is not desirable, the :func:`.mapper` can be
configured with ``batch=False``, which will cause
batches of instances to be broken up into individual
(and more poorly performing) event->persist->event
steps.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def before_delete(self, mapper, connection, target):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
def after_delete(self, mapper, connection, target):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
.. warning::
Mapper-level flush events only allow **very limited operations**,
on attributes local to the row being operated upon only,
as well as allowing any SQL to be emitted on the given
:class:`.Connection`. **Please read fully** the notes
at :ref:`session_persistence_mapper` for guidelines on using
these methods; generally, the :meth:`.SessionEvents.before_flush`
method should be preferred for general on-flush changes.
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
.. seealso::
:ref:`session_persistence_events`
"""
class _MapperEventsHold(_EventsHold):
all_holds = weakref.WeakKeyDictionary()
def resolve(self, class_):
return _mapper_or_none(class_)
class HoldMapperEvents(_EventsHold.HoldEvents, MapperEvents):
pass
dispatch = event.dispatcher(HoldMapperEvents)
class SessionEvents(event.Events):
"""Define events specific to :class:`.Session` lifecycle.
e.g.::
from sqlalchemy import event
from sqlalchemy.orm import sessionmaker
def my_before_commit(session):
print "before commit!"
Session = sessionmaker()
event.listen(Session, "before_commit", my_before_commit)
The :func:`~.event.listen` function will accept
:class:`.Session` objects as well as the return result
of :class:`~.sessionmaker()` and :class:`~.scoped_session()`.
Additionally, it accepts the :class:`.Session` class which
will apply listeners to all :class:`.Session` instances
globally.
"""
_target_class_doc = "SomeSessionOrFactory"
_dispatch_target = Session
@classmethod
def _accept_with(cls, target):
if isinstance(target, scoped_session):
target = target.session_factory
if not isinstance(target, sessionmaker) and \
(
not isinstance(target, type) or
not issubclass(target, Session)
):
raise exc.ArgumentError(
"Session event listen on a scoped_session "
"requires that its creation callable "
"is associated with the Session class.")
if isinstance(target, sessionmaker):
return target.class_
elif isinstance(target, type):
if issubclass(target, scoped_session):
return Session
elif issubclass(target, Session):
return target
elif isinstance(target, Session):
return target
else:
return None
def after_transaction_create(self, session, transaction):
"""Execute when a new :class:`.SessionTransaction` is created.
This event differs from :meth:`~.SessionEvents.after_begin`
in that it occurs for each :class:`.SessionTransaction`
overall, as opposed to when transactions are begun
on individual database connections. It is also invoked
for nested transactions and subtransactions, and is always
matched by a corresponding
:meth:`~.SessionEvents.after_transaction_end` event
(assuming normal operation of the :class:`.Session`).
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_create(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_transaction_end(self, session, transaction):
"""Execute when the span of a :class:`.SessionTransaction` ends.
This event differs from :meth:`~.SessionEvents.after_commit`
in that it corresponds to all :class:`.SessionTransaction`
objects in use, including those for nested transactions
and subtransactions, and is always matched by a corresponding
:meth:`~.SessionEvents.after_transaction_create` event.
:param session: the target :class:`.Session`.
:param transaction: the target :class:`.SessionTransaction`.
To detect if this is the outermost
:class:`.SessionTransaction`, as opposed to a "subtransaction" or a
SAVEPOINT, test that the :attr:`.SessionTransaction.parent` attribute
is ``None``::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.parent is None:
# work with top-level transaction
To detect if the :class:`.SessionTransaction` is a SAVEPOINT, use the
:attr:`.SessionTransaction.nested` attribute::
@event.listens_for(session, "after_transaction_create")
def after_transaction_end(session, transaction):
if transaction.nested:
# work with SAVEPOINT transaction
.. seealso::
:class:`.SessionTransaction`
:meth:`~.SessionEvents.after_transaction_create`
"""
def before_commit(self, session):
"""Execute before commit is called.
.. note::
The :meth:`~.SessionEvents.before_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_commit(self, session):
"""Execute after a commit has occurred.
.. note::
The :meth:`~.SessionEvents.after_commit` hook is *not* per-flush,
that is, the :class:`.Session` can emit SQL to the database
many times within the scope of a transaction.
For interception of these events, use the
:meth:`~.SessionEvents.before_flush`,
:meth:`~.SessionEvents.after_flush`, or
:meth:`~.SessionEvents.after_flush_postexec`
events.
.. note::
The :class:`.Session` is not in an active transaction
when the :meth:`~.SessionEvents.after_commit` event is invoked,
and therefore can not emit SQL. To emit SQL corresponding to
every transaction, use the :meth:`~.SessionEvents.before_commit`
event.
:param session: The target :class:`.Session`.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_begin`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
use the :meth:`.SessionEvents.after_soft_rollback` event, checking the
:attr:`.Session.is_active` flag.
:param session: The target :class:`.Session`.
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction`
transactional marker object which was just closed. The current
:class:`.SessionTransaction` for the given :class:`.Session` is
available via the :attr:`.Session.transaction` attribute.
.. versionadded:: 0.7.3
"""
def before_flush(self, session, flush_context, instances):
"""Execute before flush process has started.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
:param instances: Usually ``None``, this is the collection of
objects which can be passed to the :meth:`.Session.flush` method
(note this usage is deprecated).
.. seealso::
:meth:`~.SessionEvents.after_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush_postexec`
:ref:`session_persistence_events`
"""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
.. seealso::
:meth:`~.SessionEvents.before_flush`
:meth:`~.SessionEvents.after_flush`
:ref:`session_persistence_events`
"""
def after_begin(self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`~.engine.Connection` object
which will be used for SQL statements.
.. seealso::
:meth:`~.SessionEvents.before_commit`
:meth:`~.SessionEvents.after_commit`
:meth:`~.SessionEvents.after_transaction_create`
:meth:`~.SessionEvents.after_transaction_end`
"""
def before_attach(self, session, instance):
"""Execute before an instance is attached to a session.
This is called before an add, delete or merge causes
the object to be part of the session.
.. versionadded:: 0.8. Note that :meth:`~.SessionEvents.after_attach`
now fires off after the item is part of the session.
:meth:`.before_attach` is provided for those cases where
the item should not yet be part of the session state.
.. seealso::
:meth:`~.SessionEvents.after_attach`
:ref:`session_lifecycle_events`
"""
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge.
.. note::
As of 0.8, this event fires off *after* the item
has been fully associated with the session, which is
different than previous releases. For event
handlers that require the object not yet
be part of session state (such as handlers which
may autoflush while the target object is not
yet complete) consider the
new :meth:`.before_attach` event.
.. seealso::
:meth:`~.SessionEvents.before_attach`
:ref:`session_lifecycle_events`
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda update_context: (
update_context.session,
update_context.query,
update_context.context,
update_context.result))
def after_bulk_update(self, update_context):
"""Execute after a bulk update operation to the session.
This is called as a result of the :meth:`.Query.update` method.
:param update_context: an "update context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk UPDATE operation.
"""
@event._legacy_signature("0.9",
["session", "query", "query_context", "result"],
lambda delete_context: (
delete_context.session,
delete_context.query,
delete_context.context,
delete_context.result))
def after_bulk_delete(self, delete_context):
"""Execute after a bulk delete operation to the session.
This is called as a result of the :meth:`.Query.delete` method.
:param delete_context: a "delete context" object which contains
details about the update, including these attributes:
* ``session`` - the :class:`.Session` involved
* ``query`` -the :class:`.Query` object that this update operation
was called upon.
* ``context`` The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
* ``result`` the :class:`.ResultProxy` returned as a result of the
bulk DELETE operation.
"""
def transient_to_pending(self, session, instance):
"""Intercept the "transient to pending" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_transient(self, session, instance):
"""Intercept the "pending to transient" transition for a specific object.
This less common transition occurs when an pending object that has
not been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction,
or when the :meth:`.Session.expunge` method is used.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_transient(self, session, instance):
"""Intercept the "persistent to transient" transition for a specific object.
This less common transition occurs when an pending object that has
has been flushed is evicted from the session; this can occur
when the :meth:`.Session.rollback` method rolls back the transaction.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def pending_to_persistent(self, session, instance):
"""Intercept the "pending to persistent"" transition for a specific object.
This event is invoked within the flush process, and is
similar to scanning the :attr:`.Session.new` collection within
the :meth:`.SessionEvents.after_flush` event. However, in this
case the object has already been moved to the persistent state
when the event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def detached_to_persistent(self, session, instance):
"""Intercept the "detached to persistent" transition for a specific object.
This event is a specialization of the
:meth:`.SessionEvents.after_attach` event which is only invoked
for this specific transition. It is invoked typically during the
:meth:`.Session.add` call, as well as during the
:meth:`.Session.delete` call if the object was not previously
associated with the
:class:`.Session` (note that an object marked as "deleted" remains
in the "persistent" state until the flush proceeds).
.. note::
If the object becomes persistent as part of a call to
:meth:`.Session.delete`, the object is **not** yet marked as
deleted when this event is called. To detect deleted objects,
check the ``deleted`` flag sent to the
:meth:`.SessionEvents.persistent_to_detached` to event after the
flush proceeds, or check the :attr:`.Session.deleted` collection
within the :meth:`.SessionEvents.before_flush` event if deleted
objects need to be intercepted before the flush.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def loaded_as_persistent(self, session, instance):
"""Intercept the "loaded as persistent" transition for a specific object.
This event is invoked within the ORM loading process, and is invoked
very similarly to the :meth:`.InstanceEvents.load` event. However,
the event here is linkable to a :class:`.Session` class or instance,
rather than to a mapper or class hierarchy, and integrates
with the other session lifecycle events smoothly. The object
is guaranteed to be present in the session's identity map when
this event is called.
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_deleted(self, session, instance):
"""Intercept the "persistent to deleted" transition for a specific object.
This event is invoked when a persistent object's identity
is deleted from the database within a flush, however the object
still remains associated with the :class:`.Session` until the
transaction completes.
If the transaction is rolled back, the object moves again
to the persistent state, and the
:meth:`.SessionEvents.deleted_to_persistent` event is called.
If the transaction is committed, the object becomes detached,
which will emit the :meth:`.SessionEvents.deleted_to_detached`
event.
Note that while the :meth:`.Session.delete` method is the primary
public interface to mark an object as deleted, many objects
get deleted due to cascade rules, which are not always determined
until flush time. Therefore, there's no way to catch
every object that will be deleted until the flush has proceeded.
the :meth:`.SessionEvents.persistent_to_deleted` event is therefore
invoked at the end of a flush.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_persistent(self, session, instance):
"""Intercept the "deleted to persistent" transition for a specific object.
This transition occurs only when an object that's been deleted
successfully in a flush is restored due to a call to
:meth:`.Session.rollback`. The event is not called under
any other circumstances.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def deleted_to_detached(self, session, instance):
"""Intercept the "deleted to detached" transition for a specific object.
This event is invoked when a deleted object is evicted
from the session. The typical case when this occurs is when
the transaction for a :class:`.Session` in which the object
was deleted is committed; the object moves from the deleted
state to the detached state.
It is also invoked for objects that were deleted in a flush
when the :meth:`.Session.expunge_all` or :meth:`.Session.close`
events are called, as well as if the object is individually
expunged from its deleted state via :meth:`.Session.expunge`.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
def persistent_to_detached(self, session, instance):
"""Intercept the "persistent to detached" transition for a specific object.
This event is invoked when a persistent object is evicted
from the session. There are many conditions that cause this
to happen, including:
* using a method such as :meth:`.Session.expunge`
or :meth:`.Session.close`
* Calling the :meth:`.Session.rollback` method, when the object
was part of an INSERT statement for that session's transaction
:param session: target :class:`.Session`
:param instance: the ORM-mapped instance being operated upon.
:param deleted: boolean. If True, indicates this object moved
to the detached state because it was marked as deleted and flushed.
.. versionadded:: 1.1
.. seealso::
:ref:`session_lifecycle_events`
"""
class AttributeEvents(event.Events):
"""Define events for object attributes.
These are typically defined on the class-bound descriptor for the
target class.
e.g.::
from sqlalchemy import event
def my_append_listener(target, value, initiator):
print "received append event for target: %s" % target
event.listen(MyClass.collection, 'append', my_append_listener)
Listeners have the option to return a possibly modified version
of the value, when the ``retval=True`` flag is passed
to :func:`~.event.listen`::
def validate_phone(target, value, oldvalue, initiator):
"Strip non-numeric characters from a phone number"
return re.sub(r'(?![0-9])', '', value)
# setup listener on UserContact.phone attribute, instructing
# it to use the return value
listen(UserContact.phone, 'set', validate_phone, retval=True)
A validation function like the above can also raise an exception
such as :exc:`ValueError` to halt the operation.
Several modifiers are available to the :func:`~.event.listen` function.
:param active_history=False: When True, indicates that the
"set" event would like to receive the "old" value being
replaced unconditionally, even if this requires firing off
database loads. Note that ``active_history`` can also be
set directly via :func:`.column_property` and
:func:`.relationship`.
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
"""
_target_class_doc = "SomeClass.some_attribute"
_dispatch_target = QueryableAttribute
@staticmethod
def _set_dispatch(cls, dispatch_cls):
dispatch = event.Events._set_dispatch(cls, dispatch_cls)
dispatch_cls._active_history = False
return dispatch
@classmethod
def _accept_with(cls, target):
# TODO: coverage
if isinstance(target, interfaces.MapperProperty):
return getattr(target.parent.class_, target.key)
else:
return target
@classmethod
def _listen(cls, event_key, active_history=False,
raw=False, retval=False,
propagate=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
if active_history:
target.dispatch._active_history = True
if not raw or not retval:
def wrap(target, value, *arg):
if not raw:
target = target.obj()
if not retval:
fn(target, value, *arg)
return value
else:
return fn(target, value, *arg)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(propagate=propagate)
if propagate:
manager = instrumentation.manager_of_class(target.class_)
for mgr in manager.subclass_managers(True):
event_key.with_dispatch_target(
mgr[target.key]).base_listen(propagate=True)
def append(self, target, value, initiator):
"""Receive a collection append event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def remove(self, target, value, initiator):
"""Receive a collection remove event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: No return value is defined for this event.
"""
def set(self, target, value, oldvalue, initiator):
"""Receive a scalar set event.
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
or expired.
:param initiator: An instance of :class:`.attributes.Event`
representing the initiation of the event. May be modified
from its original value by backref handlers in order to control
chained event propagation.
.. versionchanged:: 0.9.0 the ``initiator`` argument is now
passed as a :class:`.attributes.Event` object, and may be
modified by backref handlers within a chain of backref-linked
events.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
"""
def init_scalar(self, target, value, dict_):
"""Receive a scalar "init" event.
This event is invoked when an uninitialized, unpersisted scalar
attribute is accessed. A value of ``None`` is typically returned
in this case; no changes are made to the object's state.
The event handler can alter this behavior in two ways.
One is that a value other than ``None`` may be returned. The other
is that the value may be established as part of the object's state,
which will also have the effect that it is persisted.
Typical use is to establish a specific default value of an attribute
upon access::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
dict_['some_attribute'] = SOME_CONSTANT
return SOME_CONSTANT
Above, we initialize the attribute ``MyClass.some_attribute`` to the
value of ``SOME_CONSTANT``. The above code includes the following
features:
* By setting the value ``SOME_CONSTANT`` in the given ``dict_``,
we indicate that the value is to be persisted to the database.
**The given value is only persisted to the database if we
explicitly associate it with the object**. The ``dict_`` given
is the ``__dict__`` element of the mapped object, assuming the
default attribute instrumentation system is in place.
* By establishing the ``retval=True`` flag, the value we return
from the function will be returned by the attribute getter.
Without this flag, the event is assumed to be a passive observer
and the return value of our function is ignored.
* The ``propagate=True`` flag is significant if the mapped class
includes inheriting subclasses, which would also make use of this
event listener. Without this flag, an inheriting subclass will
not use our event handler.
When we establish the value in the given dictionary, the value will
be used in the INSERT statement established by the unit of work.
Normally, the default returned value of ``None`` is not established as
part of the object, to avoid the issue of mutations occurring to the
object in response to a normally passive "get" operation, and also
sidesteps the issue of whether or not the :meth:`.AttributeEvents.set`
event should be awkwardly fired off during an attribute access
operation. This does not impact the INSERT operation since the
``None`` value matches the value of ``NULL`` that goes into the
database in any case; note that ``None`` is skipped during the INSERT
to ensure that column and SQL-level default functions can fire off.
The attribute set event :meth:`.AttributeEvents.set` as well as the
related validation feature provided by :obj:`.orm.validates` is
**not** invoked when we apply our value to the given ``dict_``. To
have these events to invoke in response to our newly generated
value, apply the value to the given object as a normal attribute
set operation::
SOME_CONSTANT = 3.1415926
@event.listens_for(
MyClass.some_attribute, "init_scalar",
retval=True, propagate=True)
def _init_some_attribute(target, dict_, value):
# will also fire off attribute set events
target.some_attribute = SOME_CONSTANT
return SOME_CONSTANT
When multiple listeners are set up, the generation of the value
is "chained" from one listener to the next by passing the value
returned by the previous listener that specifies ``retval=True``
as the ``value`` argument of the next listener.
The :meth:`.AttributeEvents.init_scalar` event may be used to
extract values from the default values and/or callables established on
mapped :class:`.Column` objects. See the "active column defaults"
example in :ref:`examples_instrumentation` for an example of this.
.. versionadded:: 1.1
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value that is to be returned before this event
listener were invoked. This value begins as the value ``None``,
however will be the return value of the previous event handler
function if multiple listeners are present.
:param dict_: the attribute dictionary of this mapped object.
This is normally the ``__dict__`` of the object, but in all cases
represents the destination that the attribute system uses to get
at the actual value of this attribute. Placing the value in this
dictionary has the effect that the value will be used in the
INSERT statement generated by the unit of work.
.. seealso::
:ref:`examples_instrumentation` - see the
``active_column_defaults.py`` example.
"""
def init_collection(self, target, collection, collection_adapter):
"""Receive a 'collection init' event.
This event is triggered for a collection-based attribute, when
the initial "empty collection" is first generated for a blank
attribute, as well as for when the collection is replaced with
a new one, such as via a set event.
E.g., given that ``User.addresses`` is a relationship-based
collection, the event is triggered here::
u1 = User()
u1.addresses.append(a1) # <- new collection
and also during replace operations::
u1.addresses = [a2, a3] # <- new collection
:param target: the object instance receiving the event.
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param collection: the new collection. This will always be generated
from what was specified as
:paramref:`.RelationshipProperty.collection_class`, and will always
be empty.
:param collection_adpater: the :class:`.CollectionAdapter` that will
mediate internal access to the collection.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
def dispose_collection(self, target, collection, collection_adpater):
"""Receive a 'collection dispose' event.
This event is triggered for a collection-based attribute when
a collection is replaced, that is::
u1.addresses.append(a1)
u1.addresses = [a2, a3] # <- old collection is disposed
The mechanics of the event will typically include that the given
collection is empty, even if it stored objects while being replaced.
.. versionadded:: 1.0.0 the :meth:`.AttributeEvents.init_collection`
and :meth:`.AttributeEvents.dispose_collection` events supersede
the :class:`.collection.linker` hook.
"""
class QueryEvents(event.Events):
"""Represent events within the construction of a :class:`.Query` object.
The events here are intended to be used with an as-yet-unreleased
inspection system for :class:`.Query`. Some very basic operations
are possible now, however the inspection system is intended to allow
complex query manipulations to be automated.
.. versionadded:: 1.0.0
"""
_target_class_doc = "SomeQuery"
_dispatch_target = Query
def before_compile(self, query):
"""Receive the :class:`.Query` object before it is composed into a
core :class:`.Select` object.
This event is intended to allow changes to the query given::
@event.listens_for(Query, "before_compile", retval=True)
def no_deleted(query):
for desc in query.column_descriptions:
if desc['type'] is User:
entity = desc['entity']
query = query.filter(entity.deleted == False)
return query
The event should normally be listened with the ``retval=True``
parameter set, so that the modified query may be returned.
"""
@classmethod
def _listen(
cls, event_key, retval=False, **kw):
fn = event_key._listen_fn
if not retval:
def wrap(*arg, **kw):
if not retval:
query = arg[0]
fn(*arg, **kw)
return query
else:
return fn(*arg, **kw)
event_key = event_key.with_wrapper(wrap)
event_key.base_listen(**kw)
| apache-2.0 |
clstl/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_argcomplete.py | 179 | 3582 | from __future__ import with_statement
import py, pytest
# test for _argcomplete but not specific for any application
def equal_with_bash(prefix, ffc, fc, out=None):
res = ffc(prefix)
res_bash = set(fc(prefix))
retval = set(res) == res_bash
if out:
out.write('equal_with_bash %s %s\n' % (retval, res))
if not retval:
out.write(' python - bash: %s\n' % (set(res) - res_bash))
out.write(' bash - python: %s\n' % (res_bash - set(res)))
return retval
# copied from argcomplete.completers as import from there
# also pulls in argcomplete.__init__ which opens filedescriptor 9
# this gives an IOError at the end of testrun
def _wrapcall(*args, **kargs):
try:
if py.std.sys.version_info > (2,7):
return py.std.subprocess.check_output(*args,**kargs).decode().splitlines()
if 'stdout' in kargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = py.std.subprocess.Popen(
stdout=py.std.subprocess.PIPE, *args, **kargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kargs.get("args")
if cmd is None:
cmd = args[0]
raise py.std.subprocess.CalledProcessError(retcode, cmd)
return output.decode().splitlines()
except py.std.subprocess.CalledProcessError:
return []
class FilesCompleter(object):
'File completer class, optionally takes a list of allowed extensions'
def __init__(self,allowednames=(),directories=True):
# Fix if someone passes in a string instead of a list
if type(allowednames) is str:
allowednames = [allowednames]
self.allowednames = [x.lstrip('*').lstrip('.') for x in allowednames]
self.directories = directories
def __call__(self, prefix, **kwargs):
completion = []
if self.allowednames:
if self.directories:
files = _wrapcall(['bash','-c',
"compgen -A directory -- '{p}'".format(p=prefix)])
completion += [ f + '/' for f in files]
for x in self.allowednames:
completion += _wrapcall(['bash', '-c',
"compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)])
else:
completion += _wrapcall(['bash', '-c',
"compgen -A file -- '{p}'".format(p=prefix)])
anticomp = _wrapcall(['bash', '-c',
"compgen -A directory -- '{p}'".format(p=prefix)])
completion = list( set(completion) - set(anticomp))
if self.directories:
completion += [f + '/' for f in anticomp]
return completion
class TestArgComplete:
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_compare_with_compgen(self):
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in '/ /d /data qqq'.split():
assert equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
@pytest.mark.skipif("sys.platform in ('win32', 'darwin')")
def test_remove_dir_prefix(self):
"""this is not compatible with compgen but it is with bash itself:
ls /usr/<TAB>
"""
from _pytest._argcomplete import FastFilesCompleter
ffc = FastFilesCompleter()
fc = FilesCompleter()
for x in '/usr/'.split():
assert not equal_with_bash(x, ffc, fc, out=py.std.sys.stdout)
| mpl-2.0 |
alanbowman/home-assistant | homeassistant/components/notify/xmpp.py | 9 | 3648 | """
homeassistant.components.notify.xmpp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Jabber (XMPP) notification service.
Configuration:
To use the Jabber notifier you will need to add something like the following
to your configuration.yaml file.
notify:
platform: xmpp
sender: YOUR_JID
password: YOUR_JABBER_ACCOUNT_PASSWORD
recipient: YOUR_RECIPIENT
Variables:
sender
*Required
The Jabber ID (JID) that will act as origin of the messages. Add your JID
including the domain, e.g. [email protected].
password
*Required
The password for your given Jabber account.
recipient
*Required
The Jabber ID (JID) that will receive the messages.
"""
import logging
_LOGGER = logging.getLogger(__name__)
try:
import sleekxmpp
except ImportError:
_LOGGER.exception(
"Unable to import sleekxmpp. "
"Did you maybe not install the 'SleekXMPP' package?")
from homeassistant.helpers import validate_config
from homeassistant.components.notify import (
DOMAIN, ATTR_TITLE, BaseNotificationService)
REQUIREMENTS = ['sleekxmpp==1.3.1', 'dnspython3==1.12.0']
def get_service(hass, config):
""" Get the Jabber (XMPP) notification service. """
if not validate_config(config,
{DOMAIN: ['sender',
'password',
'recipient']},
_LOGGER):
return None
try:
SendNotificationBot(config[DOMAIN]['sender'] + '/home-assistant',
config[DOMAIN]['password'],
config[DOMAIN]['recipient'],
'')
except ImportError:
_LOGGER.exception(
"Unable to contact jabber server."
"Please check your credentials.")
return None
return XmppNotificationService(config[DOMAIN]['sender'],
config[DOMAIN]['password'],
config[DOMAIN]['recipient'])
# pylint: disable=too-few-public-methods
class XmppNotificationService(BaseNotificationService):
""" Implements notification service for Jabber (XMPP). """
def __init__(self, sender, password, recipient):
self._sender = sender
self._password = password
self._recipient = recipient
def send_message(self, message="", **kwargs):
""" Send a message to a user. """
title = kwargs.get(ATTR_TITLE)
data = title + ": " + message
SendNotificationBot(self._sender + '/home-assistant',
self._password,
self._recipient,
data)
class SendNotificationBot(sleekxmpp.ClientXMPP):
""" Service for sending Jabber (XMPP) messages. """
def __init__(self, jid, password, recipient, msg):
super(SendNotificationBot, self).__init__(jid, password)
logging.basicConfig(level=logging.ERROR)
self.recipient = recipient
self.msg = msg
self.use_tls = True
self.use_ipv6 = False
self.add_event_handler('failed_auth', self.check_credentials)
self.add_event_handler('session_start', self.start)
self.connect()
self.process(block=False)
def start(self, event):
""" Starts the communication and sends the message. """
self.send_presence()
self.get_roster()
self.send_message(mto=self.recipient, mbody=self.msg, mtype='chat')
self.disconnect(wait=True)
def check_credentials(self, event):
"""" Disconnect from the server if credentials are invalid. """
self.disconnect()
| mit |
wolverine2k/Secure-Deluge | deluge/plugins/blocklist/setup.py | 5 | 2311 | # setup.py
#
# Copyright (C) 2008 Andrew Resch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from setuptools import setup
__plugin_name__ = "Blocklist"
__author__ = "John Garland"
__author_email__ = "[email protected]"
__version__ = "1.2"
__url__ = "http://deluge-torrent.org"
__license__ = "GPLv3"
__description__ = "Download and import IP blocklists"
__long_description__ = __description__
__pkg_data__ = {__plugin_name__.lower(): ["data/*"]}
setup(
name=__plugin_name__,
version=__version__,
description=__description__,
author=__author__,
author_email=__author_email__,
url=__url__,
license=__license__,
long_description=__long_description__,
packages=[__plugin_name__.lower()],
package_data = __pkg_data__,
entry_points="""
[deluge.plugin.core]
%s = %s:CorePlugin
[deluge.plugin.gtkui]
%s = %s:GtkUIPlugin
[deluge.plugin.web]
%s = %s:WebUIPlugin
""" % ((__plugin_name__, __plugin_name__.lower())*3)
)
| gpl-3.0 |
chaluemwut/smcdemo | demo_filter.py | 1 | 2602 | import pickle
from feature_process import FeatureMapping
import feature_process
from text_processing import TextProcessing
from sklearn.cross_validation import train_test_split
is_not_important = {9:0,
13:0,
18:0,
19:0,
23:0,
28:0,
29:0,
33:0,
34:0,
37:0,
40:0,
44:0,
46:0,
50:0,
55:0,
59:0,
61:0,
62:0,
63:0,
72:0,
73:0,
78:0,
84:0,
86:0,
88:0,
97:0,
98:0,
103:0
}
def create_training_data():
data_lst = pickle.load(open('data/harvest.data', 'rb'))
feature_process.feature_map['source'] = {'Google':1, 'Twitter for iPad':2, 'Echofon':3,
'Bitly':4, 'twitterfeed':5, 'Twitter for iPhone':6,
'Foursquare':7, 'Facebook':8, 'Twitter for Android':9,
'TweetDeck':10, 'Twitter Web Client':11}
feature_process.feature_map['geo'] = ['None']
feature_process.feature_map['place'] = ['None']
feature_process.feature_map['verified'] = ['False']
feature_process.feature_map['geo_enabled'] = ['False']
y = []
x = []
for i in range(0, len(data_lst)):
try:
label = is_not_important[i]
except Exception as e:
label = 1
data = data_lst[i]
text = TextProcessing.process(data[0])
source = FeatureMapping.mapping('source', data[1])
re_tweet = data[2]
geo = FeatureMapping.mapping_other('geo', data[3])
place = FeatureMapping.mapping_other('place', data[4])
hash_tag = data[5]
media = data[6]
verified = FeatureMapping.mapping_other('verified', data[7])
follower = data[8]
statues = data[9]
desc = TextProcessing.process(data[10])
friend = data[11]
location = TextProcessing.process(data[12])
geo_enabled = FeatureMapping.mapping_other('geo_enabled', data[13])
y.append(label)
x.append([text, source, re_tweet, geo, place, hash_tag, media, verified, follower, statues, desc, friend, location, geo_enabled])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, accuracy_score
clf = RandomForestClassifier()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
fsc = f1_score(y_test, y_pred)
acc = accuracy_score(y_test, y_pred)
print 'f1-score : ',fsc
print 'accuracy : ',acc
print y_pred
print y_test
if __name__ == '__main__':
create_training_data()
| apache-2.0 |
ychen820/microblog | y/google-cloud-sdk/.install/.backup/lib/requests/exceptions.py | 341 | 1877 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
| bsd-3-clause |
ChanChiChoi/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
SPACEDAC7/TrabajoFinalGrado | StaticAnalyzer/views/android/java.py | 5 | 2279 | # -*- coding: utf_8 -*-
"""List all java files."""
import re
import shutil
import os
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.conf import settings
from django.utils.html import escape
from MobSF.utils import (
PrintException
)
def run(request):
"""Show the java code."""
try:
match = re.match('^[0-9a-f]{32}$', request.GET['md5'])
typ = request.GET['type']
if match:
md5 = request.GET['md5']
if typ == 'eclipse':
src = os.path.join(settings.UPLD_DIR, md5 + '/src/')
elif typ == 'studio':
src = os.path.join(settings.UPLD_DIR, md5 + '/app/src/main/java/')
elif typ == 'apk':
src = os.path.join(settings.UPLD_DIR, md5 + '/java_source/')
else:
return HttpResponseRedirect('/error/')
html = ''
# pylint: disable=unused-variable
# Needed by os.walk
for dir_name, sub_dir, files in os.walk(src):
for jfile in files:
if jfile.endswith(".java"):
file_path = os.path.join(src, dir_name, jfile)
if "+" in jfile:
fp2 = os.path.join(src, dir_name, jfile.replace("+", "x"))
shutil.move(file_path, fp2)
file_path = fp2
fileparam = file_path.replace(src, '')
if any(cls in fileparam for cls in settings.SKIP_CLASSES) is False:
html += (
"<tr><td><a href='../ViewSource/?file=" + escape(fileparam) +
"&md5=" + md5 +
"&type=" + typ + "'>" +
escape(fileparam) + "</a></td></tr>"
)
context = {
'title': 'Java Source',
'files': html,
'md5': md5,
'type': typ,
}
template = "static_analysis/java.html"
return render(request, template, context)
except:
PrintException("[ERROR] Getting Java Files")
return HttpResponseRedirect('/error/')
| gpl-3.0 |
vlachoudis/sl4a | python/src/Lib/test/test_os.py | 48 | 23971 | # As a test suite for the os module, this is woefully inadequate, but this
# does add tests for a few functions which have been determined to be more
# portable than they had been thought to be.
import os
import unittest
import warnings
import sys
from test import test_support
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning, __name__)
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning, __name__)
# Tests creating TESTFN
class FileTests(unittest.TestCase):
def setUp(self):
if os.path.exists(test_support.TESTFN):
os.unlink(test_support.TESTFN)
tearDown = setUp
def test_access(self):
f = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
os.close(f)
self.assert_(os.access(test_support.TESTFN, os.W_OK))
def test_closerange(self):
first = os.open(test_support.TESTFN, os.O_CREAT|os.O_RDWR)
# We must allocate two consecutive file descriptors, otherwise
# it will mess up other file descriptors (perhaps even the three
# standard ones).
second = os.dup(first)
try:
retries = 0
while second != first + 1:
os.close(first)
retries += 1
if retries > 10:
# XXX test skipped
print >> sys.stderr, (
"couldn't allocate two consecutive fds, "
"skipping test_closerange")
return
first, second = second, os.dup(second)
finally:
os.close(second)
# close a fd that is open, and one that isn't
os.closerange(first, first + 2)
self.assertRaises(OSError, os.write, first, "a")
def test_rename(self):
path = unicode(test_support.TESTFN)
old = sys.getrefcount(path)
self.assertRaises(TypeError, os.rename, path, 0)
new = sys.getrefcount(path)
self.assertEqual(old, new)
class TemporaryFileTests(unittest.TestCase):
def setUp(self):
self.files = []
os.mkdir(test_support.TESTFN)
def tearDown(self):
for name in self.files:
os.unlink(name)
os.rmdir(test_support.TESTFN)
def check_tempfile(self, name):
# make sure it doesn't already exist:
self.failIf(os.path.exists(name),
"file already exists for temporary file")
# make sure we can create the file
open(name, "w")
self.files.append(name)
def test_tempnam(self):
if not hasattr(os, "tempnam"):
return
warnings.filterwarnings("ignore", "tempnam", RuntimeWarning,
r"test_os$")
self.check_tempfile(os.tempnam())
name = os.tempnam(test_support.TESTFN)
self.check_tempfile(name)
name = os.tempnam(test_support.TESTFN, "pfx")
self.assert_(os.path.basename(name)[:3] == "pfx")
self.check_tempfile(name)
def test_tmpfile(self):
if not hasattr(os, "tmpfile"):
return
# As with test_tmpnam() below, the Windows implementation of tmpfile()
# attempts to create a file in the root directory of the current drive.
# On Vista and Server 2008, this test will always fail for normal users
# as writing to the root directory requires elevated privileges. With
# XP and below, the semantics of tmpfile() are the same, but the user
# running the test is more likely to have administrative privileges on
# their account already. If that's the case, then os.tmpfile() should
# work. In order to make this test as useful as possible, rather than
# trying to detect Windows versions or whether or not the user has the
# right permissions, just try and create a file in the root directory
# and see if it raises a 'Permission denied' OSError. If it does, then
# test that a subsequent call to os.tmpfile() raises the same error. If
# it doesn't, assume we're on XP or below and the user running the test
# has administrative privileges, and proceed with the test as normal.
if sys.platform == 'win32':
name = '\\python_test_os_test_tmpfile.txt'
if os.path.exists(name):
os.remove(name)
try:
fp = open(name, 'w')
except IOError, first:
# open() failed, assert tmpfile() fails in the same way.
# Although open() raises an IOError and os.tmpfile() raises an
# OSError(), 'args' will be (13, 'Permission denied') in both
# cases.
try:
fp = os.tmpfile()
except OSError, second:
self.assertEqual(first.args, second.args)
else:
self.fail("expected os.tmpfile() to raise OSError")
return
else:
# open() worked, therefore, tmpfile() should work. Close our
# dummy file and proceed with the test as normal.
fp.close()
os.remove(name)
fp = os.tmpfile()
fp.write("foobar")
fp.seek(0,0)
s = fp.read()
fp.close()
self.assert_(s == "foobar")
def test_tmpnam(self):
import sys
if not hasattr(os, "tmpnam"):
return
warnings.filterwarnings("ignore", "tmpnam", RuntimeWarning,
r"test_os$")
name = os.tmpnam()
if sys.platform in ("win32",):
# The Windows tmpnam() seems useless. From the MS docs:
#
# The character string that tmpnam creates consists of
# the path prefix, defined by the entry P_tmpdir in the
# file STDIO.H, followed by a sequence consisting of the
# digit characters '0' through '9'; the numerical value
# of this string is in the range 1 - 65,535. Changing the
# definitions of L_tmpnam or P_tmpdir in STDIO.H does not
# change the operation of tmpnam.
#
# The really bizarre part is that, at least under MSVC6,
# P_tmpdir is "\\". That is, the path returned refers to
# the root of the current drive. That's a terrible place to
# put temp files, and, depending on privileges, the user
# may not even be able to open a file in the root directory.
self.failIf(os.path.exists(name),
"file already exists for temporary file")
else:
self.check_tempfile(name)
# Test attributes on return values from os.*stat* family.
class StatAttributeTests(unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
self.fname = os.path.join(test_support.TESTFN, "f1")
f = open(self.fname, 'wb')
f.write("ABC")
f.close()
def tearDown(self):
os.unlink(self.fname)
os.rmdir(test_support.TESTFN)
def test_stat_attributes(self):
if not hasattr(os, "stat"):
return
import stat
result = os.stat(self.fname)
# Make sure direct access works
self.assertEquals(result[stat.ST_SIZE], 3)
self.assertEquals(result.st_size, 3)
import sys
# Make sure all the attributes are there
members = dir(result)
for name in dir(stat):
if name[:3] == 'ST_':
attr = name.lower()
if name.endswith("TIME"):
def trunc(x): return int(x)
else:
def trunc(x): return x
self.assertEquals(trunc(getattr(result, attr)),
result[getattr(stat, name)])
self.assert_(attr in members)
try:
result[200]
self.fail("No exception thrown")
except IndexError:
pass
# Make sure that assignment fails
try:
result.st_mode = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.st_rdev = 1
self.fail("No exception thrown")
except (AttributeError, TypeError):
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the stat_result constructor with a too-short tuple.
try:
result2 = os.stat_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructr with a too-long tuple.
try:
result2 = os.stat_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_statvfs_attributes(self):
if not hasattr(os, "statvfs"):
return
try:
result = os.statvfs(self.fname)
except OSError, e:
# On AtheOS, glibc always returns ENOSYS
import errno
if e.errno == errno.ENOSYS:
return
# Make sure direct access works
self.assertEquals(result.f_bfree, result[3])
# Make sure all the attributes are there.
members = ('bsize', 'frsize', 'blocks', 'bfree', 'bavail', 'files',
'ffree', 'favail', 'flag', 'namemax')
for value, member in enumerate(members):
self.assertEquals(getattr(result, 'f_' + member), result[value])
# Make sure that assignment really fails
try:
result.f_bfree = 1
self.fail("No exception thrown")
except TypeError:
pass
try:
result.parrot = 1
self.fail("No exception thrown")
except AttributeError:
pass
# Use the constructor with a too-short tuple.
try:
result2 = os.statvfs_result((10,))
self.fail("No exception thrown")
except TypeError:
pass
# Use the constructr with a too-long tuple.
try:
result2 = os.statvfs_result((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14))
except TypeError:
pass
def test_utime_dir(self):
delta = 1000000
st = os.stat(test_support.TESTFN)
# round to int, because some systems may support sub-second
# time stamps in stat, but not in utime.
os.utime(test_support.TESTFN, (st.st_atime, int(st.st_mtime-delta)))
st2 = os.stat(test_support.TESTFN)
self.assertEquals(st2.st_mtime, int(st.st_mtime-delta))
# Restrict test to Win32, since there is no guarantee other
# systems support centiseconds
if sys.platform == 'win32':
def get_file_system(path):
root = os.path.splitdrive(os.path.abspath(path))[0] + '\\'
import ctypes
kernel32 = ctypes.windll.kernel32
buf = ctypes.create_string_buffer("", 100)
if kernel32.GetVolumeInformationA(root, None, 0, None, None, None, buf, len(buf)):
return buf.value
if get_file_system(test_support.TESTFN) == "NTFS":
def test_1565150(self):
t1 = 1159195039.25
os.utime(self.fname, (t1, t1))
self.assertEquals(os.stat(self.fname).st_mtime, t1)
def test_1686475(self):
# Verify that an open file can be stat'ed
try:
os.stat(r"c:\pagefile.sys")
except WindowsError, e:
if e.errno == 2: # file does not exist; cannot run test
return
self.fail("Could not stat pagefile.sys")
from test import mapping_tests
class EnvironTests(mapping_tests.BasicTestMappingProtocol):
"""check that os.environ object conform to mapping protocol"""
type2test = None
def _reference(self):
return {"KEY1":"VALUE1", "KEY2":"VALUE2", "KEY3":"VALUE3"}
def _empty_mapping(self):
os.environ.clear()
return os.environ
def setUp(self):
self.__save = dict(os.environ)
os.environ.clear()
def tearDown(self):
os.environ.clear()
os.environ.update(self.__save)
# Bug 1110478
def test_update2(self):
if os.path.exists("/bin/sh"):
os.environ.update(HELLO="World")
value = os.popen("/bin/sh -c 'echo $HELLO'").read().strip()
self.assertEquals(value, "World")
class WalkTests(unittest.TestCase):
"""Tests for os.walk()."""
def test_traversal(self):
import os
from os.path import join
# Build:
# TESTFN/
# TEST1/ a file kid and two directory kids
# tmp1
# SUB1/ a file kid and a directory kid
# tmp2
# SUB11/ no kids
# SUB2/ a file kid and a dirsymlink kid
# tmp3
# link/ a symlink to TESTFN.2
# TEST2/
# tmp4 a lone file
walk_path = join(test_support.TESTFN, "TEST1")
sub1_path = join(walk_path, "SUB1")
sub11_path = join(sub1_path, "SUB11")
sub2_path = join(walk_path, "SUB2")
tmp1_path = join(walk_path, "tmp1")
tmp2_path = join(sub1_path, "tmp2")
tmp3_path = join(sub2_path, "tmp3")
link_path = join(sub2_path, "link")
t2_path = join(test_support.TESTFN, "TEST2")
tmp4_path = join(test_support.TESTFN, "TEST2", "tmp4")
# Create stuff.
os.makedirs(sub11_path)
os.makedirs(sub2_path)
os.makedirs(t2_path)
for path in tmp1_path, tmp2_path, tmp3_path, tmp4_path:
f = file(path, "w")
f.write("I'm " + path + " and proud of it. Blame test_os.\n")
f.close()
if hasattr(os, "symlink"):
os.symlink(os.path.abspath(t2_path), link_path)
sub2_tree = (sub2_path, ["link"], ["tmp3"])
else:
sub2_tree = (sub2_path, [], ["tmp3"])
# Walk top-down.
all = list(os.walk(walk_path))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: TESTFN, SUB1, SUB11, SUB2
# flipped: TESTFN, SUB2, SUB1, SUB11
flipped = all[0][1][0] != "SUB1"
all[0][1].sort()
self.assertEqual(all[0], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[1 + flipped], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 + flipped], (sub11_path, [], []))
self.assertEqual(all[3 - 2 * flipped], sub2_tree)
# Prune the search.
all = []
for root, dirs, files in os.walk(walk_path):
all.append((root, dirs, files))
# Don't descend into SUB1.
if 'SUB1' in dirs:
# Note that this also mutates the dirs we appended to all!
dirs.remove('SUB1')
self.assertEqual(len(all), 2)
self.assertEqual(all[0], (walk_path, ["SUB2"], ["tmp1"]))
self.assertEqual(all[1], sub2_tree)
# Walk bottom-up.
all = list(os.walk(walk_path, topdown=False))
self.assertEqual(len(all), 4)
# We can't know which order SUB1 and SUB2 will appear in.
# Not flipped: SUB11, SUB1, SUB2, TESTFN
# flipped: SUB2, SUB11, SUB1, TESTFN
flipped = all[3][1][0] != "SUB1"
all[3][1].sort()
self.assertEqual(all[3], (walk_path, ["SUB1", "SUB2"], ["tmp1"]))
self.assertEqual(all[flipped], (sub11_path, [], []))
self.assertEqual(all[flipped + 1], (sub1_path, ["SUB11"], ["tmp2"]))
self.assertEqual(all[2 - 2 * flipped], sub2_tree)
if hasattr(os, "symlink"):
# Walk, following symlinks.
for root, dirs, files in os.walk(walk_path, followlinks=True):
if root == link_path:
self.assertEqual(dirs, [])
self.assertEqual(files, ["tmp4"])
break
else:
self.fail("Didn't follow symlink with followlinks=True")
def tearDown(self):
# Tear everything down. This is a decent use for bottom-up on
# Windows, which doesn't have a recursive delete command. The
# (not so) subtlety is that rmdir will fail unless the dir's
# kids are removed first, so bottom up is essential.
for root, dirs, files in os.walk(test_support.TESTFN, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
dirname = os.path.join(root, name)
if not os.path.islink(dirname):
os.rmdir(dirname)
else:
os.remove(dirname)
os.rmdir(test_support.TESTFN)
class MakedirTests (unittest.TestCase):
def setUp(self):
os.mkdir(test_support.TESTFN)
def test_makedir(self):
base = test_support.TESTFN
path = os.path.join(base, 'dir1', 'dir2', 'dir3')
os.makedirs(path) # Should work
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4')
os.makedirs(path)
# Try paths with a '.' in them
self.failUnlessRaises(OSError, os.makedirs, os.curdir)
path = os.path.join(base, 'dir1', 'dir2', 'dir3', 'dir4', 'dir5', os.curdir)
os.makedirs(path)
path = os.path.join(base, 'dir1', os.curdir, 'dir2', 'dir3', 'dir4',
'dir5', 'dir6')
os.makedirs(path)
def tearDown(self):
path = os.path.join(test_support.TESTFN, 'dir1', 'dir2', 'dir3',
'dir4', 'dir5', 'dir6')
# If the tests failed, the bottom-most directory ('../dir6')
# may not have been created, so we look for the outermost directory
# that exists.
while not os.path.exists(path) and path != test_support.TESTFN:
path = os.path.dirname(path)
os.removedirs(path)
class DevNullTests (unittest.TestCase):
def test_devnull(self):
f = file(os.devnull, 'w')
f.write('hello')
f.close()
f = file(os.devnull, 'r')
self.assertEqual(f.read(), '')
f.close()
class URandomTests (unittest.TestCase):
def test_urandom(self):
try:
self.assertEqual(len(os.urandom(1)), 1)
self.assertEqual(len(os.urandom(10)), 10)
self.assertEqual(len(os.urandom(100)), 100)
self.assertEqual(len(os.urandom(1000)), 1000)
# see http://bugs.python.org/issue3708
self.assertEqual(len(os.urandom(0.9)), 0)
self.assertEqual(len(os.urandom(1.1)), 1)
self.assertEqual(len(os.urandom(2.0)), 2)
except NotImplementedError:
pass
class Win32ErrorTests(unittest.TestCase):
def test_rename(self):
self.assertRaises(WindowsError, os.rename, test_support.TESTFN, test_support.TESTFN+".bak")
def test_remove(self):
self.assertRaises(WindowsError, os.remove, test_support.TESTFN)
def test_chdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_mkdir(self):
self.assertRaises(WindowsError, os.chdir, test_support.TESTFN)
def test_utime(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, None)
def test_access(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, 0)
def test_chmod(self):
self.assertRaises(WindowsError, os.utime, test_support.TESTFN, 0)
class TestInvalidFD(unittest.TestCase):
singles = ["fchdir", "fdopen", "dup", "fdatasync", "fstat",
"fstatvfs", "fsync", "tcgetpgrp", "ttyname"]
#singles.append("close")
#We omit close because it doesn'r raise an exception on some platforms
def get_single(f):
def helper(self):
if hasattr(os, f):
self.check(getattr(os, f))
return helper
for f in singles:
locals()["test_"+f] = get_single(f)
def check(self, f, *args):
self.assertRaises(OSError, f, test_support.make_bad_fd(), *args)
def test_isatty(self):
if hasattr(os, "isatty"):
self.assertEqual(os.isatty(test_support.make_bad_fd()), False)
def test_closerange(self):
if hasattr(os, "closerange"):
fd = test_support.make_bad_fd()
self.assertEqual(os.closerange(fd, fd + 10), None)
def test_dup2(self):
if hasattr(os, "dup2"):
self.check(os.dup2, 20)
def test_fchmod(self):
if hasattr(os, "fchmod"):
self.check(os.fchmod, 0)
def test_fchown(self):
if hasattr(os, "fchown"):
self.check(os.fchown, -1, -1)
def test_fpathconf(self):
if hasattr(os, "fpathconf"):
self.check(os.fpathconf, "PC_NAME_MAX")
#this is a weird one, it raises IOError unlike the others
def test_ftruncate(self):
if hasattr(os, "ftruncate"):
self.assertRaises(IOError, os.ftruncate, test_support.make_bad_fd(),
0)
def test_lseek(self):
if hasattr(os, "lseek"):
self.check(os.lseek, 0, 0)
def test_read(self):
if hasattr(os, "read"):
self.check(os.read, 1)
def test_tcsetpgrpt(self):
if hasattr(os, "tcsetpgrp"):
self.check(os.tcsetpgrp, 0)
def test_write(self):
if hasattr(os, "write"):
self.check(os.write, " ")
if sys.platform != 'win32':
class Win32ErrorTests(unittest.TestCase):
pass
class PosixUidGidTests(unittest.TestCase):
if hasattr(os, 'setuid'):
def test_setuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setuid, 0)
self.assertRaises(OverflowError, os.setuid, 1<<32)
if hasattr(os, 'setgid'):
def test_setgid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setgid, 0)
self.assertRaises(OverflowError, os.setgid, 1<<32)
if hasattr(os, 'seteuid'):
def test_seteuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.seteuid, 0)
self.assertRaises(OverflowError, os.seteuid, 1<<32)
if hasattr(os, 'setegid'):
def test_setegid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setegid, 0)
self.assertRaises(OverflowError, os.setegid, 1<<32)
if hasattr(os, 'setreuid'):
def test_setreuid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setreuid, 0, 0)
self.assertRaises(OverflowError, os.setreuid, 1<<32, 0)
self.assertRaises(OverflowError, os.setreuid, 0, 1<<32)
if hasattr(os, 'setregid'):
def test_setregid(self):
if os.getuid() != 0:
self.assertRaises(os.error, os.setregid, 0, 0)
self.assertRaises(OverflowError, os.setregid, 1<<32, 0)
self.assertRaises(OverflowError, os.setregid, 0, 1<<32)
else:
class PosixUidGidTests(unittest.TestCase):
pass
def test_main():
test_support.run_unittest(
FileTests,
TemporaryFileTests,
StatAttributeTests,
EnvironTests,
WalkTests,
MakedirTests,
DevNullTests,
URandomTests,
Win32ErrorTests,
TestInvalidFD,
PosixUidGidTests
)
if __name__ == "__main__":
test_main()
| apache-2.0 |
savoca/h811 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
briancurtin/pyrax | samples/cloudfiles/temporary_url.py | 13 | 2203 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c)2012 Rackspace US, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import requests
import time
import pyrax
import pyrax.exceptions as exc
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.rackspace_cloud_credentials")
pyrax.set_credential_file(creds_file)
cf = pyrax.cloudfiles
cont_name = pyrax.utils.random_ascii(8)
cont = cf.create_container(cont_name)
oname = pyrax.utils.random_ascii(8)
ipsum = """Import integration functools test dunder object explicit. Method
integration mercurial unit import. Future integration decorator pypy method
tuple unit pycon. Django raspberrypi mercurial 2to3 cython scipy. Cython
raspberrypi exception pypy object. Cython integration functools 2to3 object.
Future raspberrypi exception 2to3. Dunder integration community goat import
jinja exception science. Kwargs integration diversity 2to3 dunder future
functools. Import integration itertools 2to3 cython pycon unit tuple."""
print("Creating an object...")
obj = cont.store_object(oname, ipsum)
print("Getting the TempURL...")
# Get the existing TempURL key
curr_key = cf.get_temp_url_key()
if not curr_key:
# Create one.
cf.set_temp_url_key()
# Create the Temporary URL
temp_url = obj.get_temp_url(seconds=60)
print("Temporary URL")
print(temp_url)
print()
# Now try downloading it
print("Downloading the TempURL...")
resp = requests.get(temp_url)
content = resp.content
print("Downloaded content == stored content: ", content == ipsum
)
# Clean up
cf.set_temp_url_key(curr_key)
cont.delete(True)
| apache-2.0 |
mdenker/elephant | elephant/neo_tools.py | 4 | 6088 | # -*- coding: utf-8 -*-
"""
Tools to manipulate Neo objects.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function
from itertools import chain
from neo.core.container import unique_objs
def extract_neo_attrs(obj, parents=True, child_first=True,
skip_array=False, skip_none=False):
"""Given a neo object, return a dictionary of attributes and annotations.
Parameters
----------
obj : neo object
parents : bool, optional
Also include attributes and annotations from parent neo
objects (if any).
child_first : bool, optional
If True (default True), values of child attributes are used
over parent attributes in the event of a name conflict.
If False, parent attributes are used.
This parameter does nothing if `parents` is False.
skip_array : bool, optional
If True (default False), skip attributes that store non-scalar
array values.
skip_none : bool, optional
If True (default False), skip annotations and attributes that
have a value of `None`.
Returns
-------
dict
A dictionary where the keys are annotations or attribute names and
the values are the corresponding annotation or attribute value.
"""
attrs = obj.annotations.copy()
for attr in obj._necessary_attrs + obj._recommended_attrs:
if skip_array and len(attr) >= 3 and attr[2]:
continue
attr = attr[0]
if attr == getattr(obj, '_quantity_attr', None):
continue
attrs[attr] = getattr(obj, attr, None)
if skip_none:
for attr, value in attrs.copy().items():
if value is None:
del attrs[attr]
if not parents:
return attrs
for parent in getattr(obj, 'parents', []):
if parent is None:
continue
newattr = extract_neo_attrs(parent, parents=True,
child_first=child_first,
skip_array=skip_array,
skip_none=skip_none)
if child_first:
newattr.update(attrs)
attrs = newattr
else:
attrs.update(newattr)
return attrs
def _get_all_objs(container, classname):
"""Get all `neo` objects of a given type from a container.
The objects can be any list, dict, or other iterable or mapping containing
neo objects of a particular class, as well as any neo object that can hold
the object.
Objects are searched recursively, so the objects can be nested (such as a
list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo container
The container for the neo objects.
classname : str
The name of the class, with proper capitalization
(so `SpikeTrain`, not `Spiketrain` or `spiketrain`)
Returns
-------
list
A list of unique `neo` objects
"""
if container.__class__.__name__ == classname:
return [container]
classholder = classname.lower() + 's'
if hasattr(container, classholder):
vals = getattr(container, classholder)
elif hasattr(container, 'list_children_by_class'):
vals = container.list_children_by_class(classname)
elif hasattr(container, 'values') and not hasattr(container, 'ndim'):
vals = container.values()
elif hasattr(container, '__iter__') and not hasattr(container, 'ndim'):
vals = container
else:
raise ValueError('Cannot handle object of type %s' % type(container))
res = list(chain.from_iterable(_get_all_objs(obj, classname)
for obj in vals))
return unique_objs(res)
def get_all_spiketrains(container):
"""Get all `neo.Spiketrain` objects from a container.
The objects can be any list, dict, or other iterable or mapping containing
spiketrains, as well as any neo object that can hold spiketrains:
`neo.Block`, `neo.ChannelIndex`, `neo.Unit`, and `neo.Segment`.
Containers are searched recursively, so the objects can be nested
(such as a list of blocks).
Parameters
----------
container : list, tuple, iterable, dict,
neo Block, neo Segment, neo Unit, neo ChannelIndex
The container for the spiketrains.
Returns
-------
list
A list of the unique `neo.SpikeTrain` objects in `container`.
"""
return _get_all_objs(container, 'SpikeTrain')
def get_all_events(container):
"""Get all `neo.Event` objects from a container.
The objects can be any list, dict, or other iterable or mapping containing
events, as well as any neo object that can hold events:
`neo.Block` and `neo.Segment`.
Containers are searched recursively, so the objects can be nested
(such as a list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo Block, neo Segment
The container for the events.
Returns
-------
list
A list of the unique `neo.Event` objects in `container`.
"""
return _get_all_objs(container, 'Event')
def get_all_epochs(container):
"""Get all `neo.Epoch` objects from a container.
The objects can be any list, dict, or other iterable or mapping containing
epochs, as well as any neo object that can hold epochs:
`neo.Block` and `neo.Segment`.
Containers are searched recursively, so the objects can be nested
(such as a list of blocks).
Parameters
----------
container : list, tuple, iterable, dict, neo Block, neo Segment
The container for the epochs.
Returns
-------
list
A list of the unique `neo.Epoch` objects in `container`.
"""
return _get_all_objs(container, 'Epoch')
| bsd-3-clause |
philanthropy-u/edx-platform | common/djangoapps/philu_commands/management/commands/sync_users_with_mailchimp.py | 1 | 5558 | """
A command to collect users data and sync with mailchimp learner's list
"""
from logging import getLogger
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db import connection
from lms.djangoapps.certificates import api as certificate_api
from lms.djangoapps.onboarding.models import FocusArea, OrgSector
from mailchimp_pipeline.client import ChimpClient
from mailchimp_pipeline.helpers import get_enrollements_course_short_ids, get_user_active_enrollements
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = getLogger(__name__)
class Command(BaseCommand):
"""
A command to collect users data and sync with mailchimp learner's list
"""
help = """
One time addition of already existing users into mailchimp learner's list
example:
manage.py sync_users_with_mailchimp
"""
def send_user_to_mailchimp(self, client, users):
client.add_list_members_in_batch(settings.MAILCHIMP_LEARNERS_LIST_ID, {
"members": users,
"update_existing": True
})
def get_users_data_to_send(self, users):
"""
Get users data to send to mailchimp
Args:
users (list): List of user objects
Returns:
list: list of dicts with users data
"""
users_set = []
focus_areas = FocusArea.get_map()
org_sectors = OrgSector.objects.get_map()
for user in users:
language = country = city = organization = org_type = work_area = ""
profile = extended_profile = None
try:
profile = user.profile
extended_profile = user.extended_profile
if profile.language:
language = profile.language
if profile.country:
country = profile.country.name.format()
if profile.city:
city = profile.city
if extended_profile.organization:
organization = extended_profile.organization.label
work_area = str(focus_areas.get(
extended_profile.organization.focus_area, ""
))
if extended_profile.organization.org_type:
org_type = org_sectors.get(
extended_profile.organization.org_type, ''
)
except Exception: # pylint: disable=broad-except
log.exception(
"User %s does not have related object profile or extended_profile.",
user.username
)
all_certs = []
try:
all_certs = certificate_api.get_certificates_for_user(user.username)
except Exception as ex: # pylint: disable=broad-except
log.exception(str(ex.args))
completed_course_keys = [cert.get('course_key', '') for cert in all_certs
if certificate_api.is_passing_status(cert['status'])]
completed_courses = CourseOverview.objects.filter(id__in=completed_course_keys)
try:
user_json = {
"email_address": user.email,
"status_if_new": "subscribed",
"merge_fields": {
"FULLNAME": user.get_full_name(),
"USERNAME": user.username,
"LANG": language,
"COUNTRY": country,
"CITY": city,
"DATEREGIS": str(user.date_joined.strftime("%m/%d/%Y")),
"LSOURCE": "",
"COMPLETES": ", ".join([course.display_name for course in completed_courses]),
"ENROLLS": get_user_active_enrollements(user.username),
"ENROLL_IDS": get_enrollements_course_short_ids(user.username),
"ORG": organization,
"ORGTYPE": org_type,
"WORKAREA": work_area,
}
}
except Exception as ex: # pylint: disable=broad-except
log.info("There was an error for user with email address as {}".format(user.email))
log.exception(str(ex.args))
continue
users_set.append(user_json)
return users_set
def handle(self, *args, **options):
batch_size = 500
cursor = connection.cursor()
cursor.execute('SET TRANSACTION ISOLATION LEVEL READ COMMITTED')
client = ChimpClient()
total_user_count = User.objects.all().count()
page_count = total_user_count / batch_size
counter = 0
while counter is not page_count + 1:
try:
page_start = counter * batch_size
page_end = page_start + batch_size
users = list(User.objects.all()[page_start:page_end])
log.info(User.objects.all()[page_start:page_end].query)
users_json = self.get_users_data_to_send(users)
self.send_user_to_mailchimp(client, users_json)
except Exception as ex: # pylint: disable=broad-except
log.info("There was an error in batch from {} to {}".format(page_start, page_end))
log.exception(str(ex.args))
counter += 1
| agpl-3.0 |
jabesq/home-assistant | tests/components/uptime/test_sensor.py | 8 | 4256 | """The tests for the uptime sensor platform."""
import unittest
from unittest.mock import patch
from datetime import timedelta
from homeassistant.util.async_ import run_coroutine_threadsafe
from homeassistant.setup import setup_component
from homeassistant.components.uptime.sensor import UptimeSensor
from tests.common import get_test_home_assistant
class TestUptimeSensor(unittest.TestCase):
"""Test the uptime sensor."""
def setUp(self):
"""Set up things to run when tests begin."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_uptime_min_config(self):
"""Test minimum uptime configuration."""
config = {
'sensor': {
'platform': 'uptime',
}
}
assert setup_component(self.hass, 'sensor', config)
def test_uptime_sensor_name_change(self):
"""Test uptime sensor with different name."""
config = {
'sensor': {
'platform': 'uptime',
'name': 'foobar',
}
}
assert setup_component(self.hass, 'sensor', config)
def test_uptime_sensor_config_hours(self):
"""Test uptime sensor with hours defined in config."""
config = {
'sensor': {
'platform': 'uptime',
'unit_of_measurement': 'hours',
}
}
assert setup_component(self.hass, 'sensor', config)
def test_uptime_sensor_config_minutes(self):
"""Test uptime sensor with minutes defined in config."""
config = {
'sensor': {
'platform': 'uptime',
'unit_of_measurement': 'minutes',
}
}
assert setup_component(self.hass, 'sensor', config)
def test_uptime_sensor_days_output(self):
"""Test uptime sensor output data."""
sensor = UptimeSensor('test', 'days')
assert sensor.unit_of_measurement == 'days'
new_time = sensor.initial + timedelta(days=1)
with patch('homeassistant.util.dt.now', return_value=new_time):
run_coroutine_threadsafe(
sensor.async_update(),
self.hass.loop
).result()
assert sensor.state == 1.00
new_time = sensor.initial + timedelta(days=111.499)
with patch('homeassistant.util.dt.now', return_value=new_time):
run_coroutine_threadsafe(
sensor.async_update(),
self.hass.loop
).result()
assert sensor.state == 111.50
def test_uptime_sensor_hours_output(self):
"""Test uptime sensor output data."""
sensor = UptimeSensor('test', 'hours')
assert sensor.unit_of_measurement == 'hours'
new_time = sensor.initial + timedelta(hours=16)
with patch('homeassistant.util.dt.now', return_value=new_time):
run_coroutine_threadsafe(
sensor.async_update(),
self.hass.loop
).result()
assert sensor.state == 16.00
new_time = sensor.initial + timedelta(hours=72.499)
with patch('homeassistant.util.dt.now', return_value=new_time):
run_coroutine_threadsafe(
sensor.async_update(),
self.hass.loop
).result()
assert sensor.state == 72.50
def test_uptime_sensor_minutes_output(self):
"""Test uptime sensor output data."""
sensor = UptimeSensor('test', 'minutes')
assert sensor.unit_of_measurement == 'minutes'
new_time = sensor.initial + timedelta(minutes=16)
with patch('homeassistant.util.dt.now', return_value=new_time):
run_coroutine_threadsafe(
sensor.async_update(),
self.hass.loop
).result()
assert sensor.state == 16.00
new_time = sensor.initial + timedelta(minutes=12.499)
with patch('homeassistant.util.dt.now', return_value=new_time):
run_coroutine_threadsafe(
sensor.async_update(),
self.hass.loop
).result()
assert sensor.state == 12.50
| apache-2.0 |
ribeiro-ucl/viewflow | tests/unit/flows.py | 2 | 2501 | from viewflow import flow, lock
from viewflow.base import Flow, this
from viewflow.contrib import celery
from . import tasks
from .models import TestProcess
from .signals import test_start_flow, test_done_flow_task
@flow.flow_view()
def perform_task(request, activation):
raise NotImplementedError
class SingleTaskFlow(Flow):
lock_impl = lock.cache_lock
start = flow.Start() \
.Next('task')
task = flow.View(perform_task)\
.Next('end')
end = flow.End()
class AllTaskFlow(Flow):
lock_impl = lock.cache_lock
start = flow.Start().Next(this.view)
view = flow.View(perform_task).Next(this.job)
job = celery.Job(tasks.dummy_job).Next(this.iff)
iff = flow.If(lambda act: True).OnTrue(this.switch).OnFalse(this.switch)
switch = flow.Switch().Default(this.split)
split = flow.Split().Always(this.join)
join = flow.Join().Next(this.first)
first = flow.First().Of(this.end)
end = flow.End()
class FailedJobFlow(Flow):
"""
Test that failed job gate stored task in error state
"""
lock_impl = lock.cache_lock
start = flow.Start().Next(this.job)
job = celery.Job(tasks.dummy_job).Next(this.iff)
iff = flow.If(lambda p: 2/(1-1)).OnTrue(this.end).OnFalse(this.end)
end = flow.End()
class FailedGateFlow(Flow):
"""
Test that failed If gate not reflects on finished job
"""
lock_impl = lock.cache_lock
start = flow.Start().Next(this.job)
job = celery.Job(tasks.dummy_job).Next(this.iff)
iff = flow.If(lambda p: 2/0).OnTrue(this.end).OnFalse(this.end)
end = flow.End()
class AutoPermissionsFlow(Flow):
process_cls = TestProcess
start = flow.Start() \
.Permission(auto_create=True) \
.Next(this.end)
end = flow.End()
class SignalFlow(Flow):
process_cls = TestProcess
start = flow.StartSignal(test_start_flow, tasks.start_process) \
.Next(this.task)
task = flow.Signal(test_done_flow_task, tasks.do_signal_task) \
.Next(this.end)
end = flow.End()
class FunctionFlow(Flow):
process_cls = TestProcess
start = flow.StartFunction(tasks.start_process) \
.Next(this.task1)
task1 = flow.Handler(tasks.do_handler_task) \
.Next(this.task2)
task2 = flow.Function(tasks.do_func_task) \
.Next(this.end)
end = flow.End()
class DefaultProcessFunctionFlow(Flow):
start = flow.StartFunction(tasks.start_process) \
.Next(this.end)
end = flow.End()
| agpl-3.0 |
Jionglun/w17test_2 | static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py | 733 | 66279 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| gpl-3.0 |
ZephyrSurfer/dolphin | Tools/check-includes.py | 148 | 3063 | #! /usr/bin/env python
"""
check-includes.py <file...>
Checks if the includes are sorted properly and following the "system headers
before local headers" rule.
Ignores what is in #if blocks to avoid false negatives.
"""
import re
import sys
def exclude_if_blocks(lines):
'''Removes lines from #if ... #endif blocks.'''
level = 0
for l in lines:
if l.startswith('#if'):
level += 1
elif l.startswith('#endif'):
level -= 1
elif level == 0:
yield l
def filter_includes(lines):
'''Removes lines that are not #include and keeps only the file part.'''
for l in lines:
if l.startswith('#include'):
if 'NOLINT' not in l:
yield l.split(' ')[1]
class IncludeFileSorter(object):
def __init__(self, path):
self.path = path
def __lt__(self, other):
'''Sorting function for include files.
* System headers go before local headers (check the first character -
if it's different, then the one starting with " is the 'larger').
* Then, iterate on all the path components:
* If they are equal, try to continue to the next path component.
* If not, return whether the path component are smaller/larger.
* Paths with less components should go first, so after iterating, check
whether one path still has some / in it.
'''
a, b = self.path, other.path
if a[0] != b[0]:
return False if a[0] == '"' else True
a, b = a[1:-1].lower(), b[1:-1].lower()
while '/' in a and '/' in b:
ca, a = a.split('/', 1)
cb, b = b.split('/', 1)
if ca != cb:
return ca < cb
if '/' in a:
return False
elif '/' in b:
return True
else:
return a < b
def __eq__(self, other):
return self.path.lower() == other.path.lower()
def sort_includes(includes):
return sorted(includes, key=IncludeFileSorter)
def show_differences(bad, good):
bad = [' Current'] + bad
good = [' Should be'] + good
longest = max(len(i) for i in bad)
padded = [i + ' ' * (longest + 4 - len(i)) for i in bad]
return '\n'.join('%s%s' % t for t in zip(padded, good))
def check_file(path):
print('Checking %s' % path)
try:
try:
data = open(path, encoding='utf-8').read()
except TypeError: # py2
data = open(path).read().decode('utf-8')
except UnicodeDecodeError:
sys.stderr.write('%s: bad UTF-8 data\n' % path)
return
lines = (l.strip() for l in data.split('\n'))
lines = exclude_if_blocks(lines)
includes = list(filter_includes(lines))
sorted_includes = sort_includes(includes)
if includes != sorted_includes:
sys.stderr.write('%s: includes are incorrect\n' % path)
sys.stderr.write(show_differences(includes, sorted_includes) + '\n')
if __name__ == '__main__':
for path in sys.argv[1:]:
check_file(path)
| gpl-2.0 |
kevaughan/project3 | server/lib/werkzeug/debug/tbtools.py | 311 | 16785 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import json
import inspect
import traceback
import codecs
from tokenize import TokenError
from werkzeug.utils import cached_property, escape
from werkzeug.debug.console import Console
from werkzeug._compat import range_type, PY2, text_type, string_types
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(r'^(.*?)$(?m)')
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = '\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css" type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does not by
accident trigger a request to /favicon.ico which might change the application
state. -->
<link rel="shortcut icon" href="?__debugger__=yes&cmd=resource&f=console.png">
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
SECRET = "%(secret)s";
</script>
</head>
<body>
<div class="debugger">
'''
FOOTER = u'''\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
</body>
</html>
'''
PAGE_HTML = HEADER + u'''\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
''' + FOOTER + '''
<!--
%(plaintext_cs)s
-->
'''
CONSOLE_HTML = HEADER + u'''\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
''' + FOOTER
SUMMARY_HTML = u'''\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
'''
FRAME_HTML = u'''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<pre>%(current_line)s</pre>
</div>
'''
SOURCE_TABLE_HTML = u'<table class=source>%s</table>'
SOURCE_LINE_HTML = u'''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def render_console_html(secret):
return CONSOLE_HTML % {
'evalex': 'true',
'console': 'true',
'title': 'Console',
'secret': secret,
'traceback_id': -1
}
def get_current_traceback(ignore_system_exceptions=False,
show_hidden_frames=False, skip=0):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
raise
for x in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': u' '.join(self.classes),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = ''.join(buf).strip()
return rv.decode('utf-8', 'replace') if PY2 else rv
exception = property(exception)
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u'\n'
if PY2:
tb.encode('utf-8', 'replace')
logfile.write(tb)
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps({
'description': 'Werkzeug Internal Server Error',
'public': False,
'files': {
'traceback.txt': {
'content': self.plaintext
}
}
}).encode('utf-8')
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen('https://api.github.com/gists', data=data)
resp = json.loads(rv.read().decode('utf-8'))
rv.close()
return {
'url': resp['html_url'],
'id': resp['id']
}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = u'Syntax Error'
else:
title = u'Traceback <em>(most recent call last)</em>:'
for frame in self.frames:
frames.append(u'<li%s>%s' % (
frame.info and u' title="%s"' % escape(frame.info) or u'',
frame.render()
))
if self.is_syntax_error:
description_wrapper = u'<pre class=syntaxerror>%s</pre>'
else:
description_wrapper = u'<blockquote>%s</blockquote>'
return SUMMARY_HTML % {
'classes': u' '.join(classes),
'title': title and u'<h3>%s</h3>' % title or u'',
'frames': u'\n'.join(frames),
'description': description_wrapper % escape(self.exception)
}
def render_full(self, evalex=False, secret=None):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': self.render_summary(include_title=False),
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'secret': secret
}
def generate_plaintext_traceback(self):
"""Like the plaintext attribute but returns a generator"""
yield u'Traceback (most recent call last):'
for frame in self.frames:
yield u' File "%s", line %s, in %s' % (
frame.filename,
frame.lineno,
frame.function_name
)
yield u' ' + frame.current_line.strip()
yield self.exception
def plaintext(self):
return u'\n'.join(self.generate_plaintext_traceback())
plaintext = cached_property(plaintext)
id = property(lambda x: id(x))
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in ('.pyo', '.pyc'):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = fn
self.module = self.globals.get('__name__')
self.loader = self.globals.get('__loader__')
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get('__traceback_hide__', False)
info = self.locals.get('__traceback_info__')
if info is not None:
try:
info = text_type(info)
except UnicodeError:
info = str(info).decode('utf-8', 'replace')
self.info = info
def render(self):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
'id': self.id,
'filename': escape(self.filename),
'lineno': self.lineno,
'function_name': escape(self.function_name),
'current_line': escape(self.current_line.strip())
}
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + '\n' for x
in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno:lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def render_source(self):
"""Render the sourcecode."""
return SOURCE_TABLE_HTML % u'\n'.join(line.render() for line in
self.get_annotated_lines())
def eval(self, code, mode='single'):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, unicode):
code = UTF8_COOKIE + code.encode('utf-8')
code = compile(code, '<interactive>', mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(self.filename)
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _line_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u''
@cached_property
def console(self):
return Console(self.globals, self.locals)
id = property(lambda x: id(x))
| apache-2.0 |
TNosredna/CouchPotatoServer | couchpotato/core/notifications/notifymywp/__init__.py | 16 | 1303 | from .main import NotifyMyWP
def start():
return NotifyMyWP()
config = [{
'name': 'notifymywp',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'notifymywp',
'label': 'Windows Phone',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'api_key',
'description': 'Multiple keys seperated by a comma. Maximum of 5.'
},
{
'name': 'dev_key',
'advanced': True,
},
{
'name': 'priority',
'default': 0,
'type': 'dropdown',
'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)],
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| gpl-3.0 |
TribeMedia/sky_engine | tools/android/mempressure.py | 36 | 3713 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import logging
import optparse
import os
import sys
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir,
'build',
'android')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import constants
from pylib import flag_changer
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.device import intent
# Browser Constants
DEFAULT_BROWSER = 'chrome'
# Action Constants
ACTION_PACKAGE = 'org.chromium.base'
ACTION_TRIM = {
'moderate' : ACTION_PACKAGE + '.ACTION_TRIM_MEMORY_MODERATE',
'critical' : ACTION_PACKAGE + '.ACTION_TRIM_MEMORY_RUNNING_CRITICAL',
'complete' : ACTION_PACKAGE + '.ACTION_TRIM_MEMORY'
}
ACTION_LOW = ACTION_PACKAGE + '.ACTION_LOW_MEMORY'
# Command Line Constants
ENABLE_TEST_INTENTS_FLAG = '--enable-test-intents'
def main(argv):
option_parser = optparse.OptionParser()
option_parser.add_option('-l',
'--low',
help='Simulate Activity#onLowMemory()',
action='store_true')
option_parser.add_option('-t',
'--trim',
help=('Simulate Activity#onTrimMemory(...) with ' +
', '.join(ACTION_TRIM.keys())),
type='string')
option_parser.add_option('-b',
'--browser',
default=DEFAULT_BROWSER,
help=('Which browser to use. One of ' +
', '.join(constants.PACKAGE_INFO.keys()) +
' [default: %default]'),
type='string')
(options, args) = option_parser.parse_args(argv)
if len(args) > 1:
print 'Unknown argument: ', args[1:]
option_parser.print_help()
sys.exit(1)
if options.low and options.trim:
option_parser.error('options --low and --trim are mutually exclusive')
if not options.low and not options.trim:
option_parser.print_help()
sys.exit(1)
action = None
if options.low:
action = ACTION_LOW
elif options.trim in ACTION_TRIM.keys():
action = ACTION_TRIM[options.trim]
if action is None:
option_parser.print_help()
sys.exit(1)
if not options.browser in constants.PACKAGE_INFO.keys():
option_parser.error('Unknown browser option ' + options.browser)
package_info = constants.PACKAGE_INFO[options.browser]
package = package_info.package
activity = package_info.activity
devices = device_utils.DeviceUtils.HealthyDevices()
if not devices:
raise device_errors.NoDevicesError()
elif len(devices) > 1:
logging.warning('Multiple devices attached. Using %s.', str(devices[0]))
device = devices[0]
try:
device.EnableRoot()
except device_errors.CommandFailedError as e:
# Try to change the flags and start the activity anyway.
# TODO(jbudorick) Handle this exception appropriately after interface
# conversions are finished.
logging.error(str(e))
flags = flag_changer.FlagChanger(device, package_info.cmdline_file)
if ENABLE_TEST_INTENTS_FLAG not in flags.Get():
flags.AddFlags([ENABLE_TEST_INTENTS_FLAG])
device.StartActivity(intent.Intent(package=package, activity=activity,
action=action))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
alexbelyeu/three.js | utils/converters/obj/convert_obj_three.py | 160 | 48659 | """Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-d invert|normal invert transparency
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
original model is assumed to use non-inverted transparency / dissolve (0.0 fully transparent, 1.0 fully opaque)
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", createScene );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", createScene );
function createScene( geometry, materials ) {
var mesh = new THREE.Mesh( geometry, new THREE.MeshFaceMaterial( materials ) );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRANSPARENCY = "normal" # normal invert
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in xrange(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath.replace("\\", "/"))
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "newmtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Material start
# newmtl identifier
if chunks[0] == "newmtl":
if len(chunks) > 1:
identifier = chunks[1]
else:
identifier = ""
if not identifier in materials:
materials[identifier] = {}
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Ambient texture
# map_Ka texture_ambient.jpg
if chunks[0] == "map_Ka" and len(chunks) == 2:
materials[identifier]["mapAmbient"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["transparent"] = True
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Ambient color
# Ka 1.000 1.000 1.000
if chunks[0] == "Ka" and len(chunks) == 4:
materials[identifier]["colorAmbient"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Transparency
# Tr 0.9 or d 0.9
if (chunks[0] == "Tr" or chunks[0] == "d") and len(chunks) == 2:
materials[identifier]["transparent"] = True
if TRANSPARENCY == "invert":
materials[identifier]["transparency"] = 1.0 - float(chunks[1])
else:
materials[identifier]["transparency"] = float(chunks[1])
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
material = ""
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "usemtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl":
if len(chunks) > 1:
material = chunks[1]
else:
material = ""
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
# Precompute vert / normal / uv lists
# for negative index lookup
vertlen = len(vertices) + 1
normlen = len(normals) + 1
uvlen = len(uvs) + 1
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
if vertex['v'] < 0:
vertex['v'] += vertlen
vertex_index.append(vertex['v'])
if vertex['t']:
if vertex['t'] < 0:
vertex['t'] += uvlen
uv_index.append(vertex['t'])
if vertex['n']:
if vertex['n'] < 0:
vertex['n'] += normlen
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in xrange(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in xrange(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in xrange(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print "adding [%s] with %d vertices" % (name, n_morph_vertices)
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
elif n_faces != n_morph_faces:
print "WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces)
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print "adding [%s] with %d face colors" % (name, len(morphFaceColors))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print "Couldn't find [%s]" % fname
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print "%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack('<12s', 'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write("".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-d", "--dissolve"):
if a in ("normal", "invert"):
TRANSPARENCY = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Converting [%s] into [%s] ..." % (infile, outfile)
if morphfiles:
print "Morphs [%s]" % morphfiles
if colorfiles:
print "Colors [%s]" % colorfiles
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
| mit |
jtackaberry/stagehand | external/metadata/video/ogm.py | 1 | 11299 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# ogm.py - ogm/ogg file parser
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer
#
# First Edition: Thomas Schueppel <[email protected]>
# Maintainer: Dirk Meyer <[email protected]>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = ['Parser']
# python imports
import struct
import re
import stat
import os
import logging
# import kaa.metadata.video core
from . import core
# get logging object
log = logging.getLogger('metadata')
PACKET_TYPE_HEADER = 0x01
PACKED_TYPE_METADATA = 0x03
PACKED_TYPE_SETUP = 0x05
PACKET_TYPE_BITS = 0x07
PACKET_IS_SYNCPOINT = 0x08
#VORBIS_VIDEO_PACKET_INFO = 'video'
STREAM_HEADER_VIDEO = '<4sIQQIIHII'
STREAM_HEADER_AUDIO = '<4sIQQIIHHHI'
VORBISCOMMENT = { 'TITLE': 'title',
'ALBUM': 'album',
'ARTIST': 'artist',
'COMMENT': 'comment',
'ENCODER': 'encoder',
'TRACKNUMBER': 'trackno',
'LANGUAGE': 'language',
'GENRE': 'genre',
}
# FIXME: check VORBISCOMMENT date and convert to timestamp
# Deactived tag: 'DATE': 'date',
MAXITERATIONS = 30
class Ogm(core.AVContainer):
table_mapping = { 'VORBISCOMMENT' : VORBISCOMMENT }
def __init__(self, file):
core.AVContainer.__init__(self)
self.samplerate = 1
self.all_streams = [] # used to add meta data to streams
self.all_header = []
for i in range(MAXITERATIONS):
granule, nextlen = self._parseOGGS(file)
if granule == None:
if i == 0:
# oops, bad file
raise core.ParseError()
break
elif granule > 0:
# ok, file started
break
# seek to the end of the stream, to avoid scanning the whole file
if (os.stat(file.name)[stat.ST_SIZE] > 50000):
file.seek(os.stat(file.name)[stat.ST_SIZE]-49000)
# read the rest of the file into a buffer
h = file.read()
# find last OggS to get length info
if len(h) > 200:
idx = h.find(b'OggS')
pos = -49000 + idx
if idx:
file.seek(os.stat(file.name)[stat.ST_SIZE] + pos)
while 1:
granule, nextlen = self._parseOGGS(file)
if not nextlen:
break
# Copy metadata to the streams
if len(self.all_header) == len(self.all_streams):
for i in range(len(self.all_header)):
# get meta info
for key in list(self.all_streams[i].keys()):
if key in self.all_header[i]:
self.all_streams[i][key] = self.all_header[i][key]
del self.all_header[i][key]
if key.upper() in self.all_header[i]:
asi = self.all_header[i][key.upper()]
self.all_streams[i][key] = asi
del self.all_header[i][key.upper()]
# Chapter parser
if 'CHAPTER01' in self.all_header[i] and \
not self.chapters:
while 1:
s = 'CHAPTER%02d' % (len(self.chapters) + 1)
if s in self.all_header[i] and \
s + 'NAME' in self.all_header[i]:
pos = self.all_header[i][s]
try:
pos = int(pos)
except ValueError:
new_pos = 0
for v in pos.split(':'):
new_pos = new_pos * 60 + float(v)
pos = int(new_pos)
c = self.all_header[i][s + 'NAME']
c = core.Chapter(c, pos)
del self.all_header[i][s + 'NAME']
del self.all_header[i][s]
self.chapters.append(c)
else:
break
# If there are no video streams in this ogg container, it
# must be an audio file. Raise an exception to cause the
# factory to fall back to audio.ogg.
if len(self.video) == 0:
raise core.ParseError
# Copy Metadata from tables into the main set of attributes
for header in self.all_header:
self._appendtable('VORBISCOMMENT', header)
def _parseOGGS(self,file):
h = file.read(27)
if len(h) == 0:
# Regular File end
return None, None
elif len(h) < 27:
log.debug("%d Bytes of Garbage found after End." % len(h))
return None, None
if h[:4] != "OggS":
log.debug("Invalid Ogg")
raise core.ParseError()
version = ord(h[4])
if version != 0:
log.debug("Unsupported OGG/OGM Version %d." % version)
return None, None
head = struct.unpack('<BQIIIB', h[5:])
headertype, granulepos, serial, pageseqno, checksum, \
pageSegCount = head
self.mime = 'application/ogm'
self.type = 'OGG Media'
tab = file.read(pageSegCount)
nextlen = 0
for i in range(len(tab)):
nextlen += ord(tab[i])
else:
h = file.read(1)
packettype = ord(h[0]) & PACKET_TYPE_BITS
if packettype == PACKET_TYPE_HEADER:
h += file.read(nextlen-1)
self._parseHeader(h, granulepos)
elif packettype == PACKED_TYPE_METADATA:
h += file.read(nextlen-1)
self._parseMeta(h)
else:
file.seek(nextlen-1,1)
if len(self.all_streams) > serial:
stream = self.all_streams[serial]
if hasattr(stream, 'samplerate') and \
stream.samplerate:
stream.length = granulepos / stream.samplerate
elif hasattr(stream, 'bitrate') and \
stream.bitrate:
stream.length = granulepos / stream.bitrate
return granulepos, nextlen + 27 + pageSegCount
def _parseMeta(self,h):
flags = ord(h[0])
headerlen = len(h)
if headerlen >= 7 and h[1:7] == 'vorbis':
header = {}
nextlen, self.encoder = self._extractHeaderString(h[7:])
numItems = struct.unpack('<I',h[7+nextlen:7+nextlen+4])[0]
start = 7+4+nextlen
for i in range(numItems):
(nextlen, s) = self._extractHeaderString(h[start:])
start += nextlen
if s:
a = re.split('=',s)
header[(a[0]).upper()]=a[1]
# Put Header fields into info fields
self.type = 'OGG Vorbis'
self.subtype = ''
self.all_header.append(header)
def _parseHeader(self,header,granule):
headerlen = len(header)
flags = ord(header[0])
if headerlen >= 30 and header[1:7] == 'vorbis':
ai = core.AudioStream()
ai.version, ai.channels, ai.samplerate, bitrate_max, ai.bitrate, \
bitrate_min, blocksize, framing = \
struct.unpack('<IBIiiiBB',header[7:7+23])
ai.codec = 'Vorbis'
#ai.granule = granule
#ai.length = granule / ai.samplerate
self.audio.append(ai)
self.all_streams.append(ai)
elif headerlen >= 7 and header[1:7] == 'theora':
# Theora Header
# XXX Finish Me
vi = core.VideoStream()
vi.codec = 'theora'
self.video.append(vi)
self.all_streams.append(vi)
elif headerlen >= 142 and \
header[1:36] == 'Direct Show Samples embedded in Ogg':
# Old Directshow format
# XXX Finish Me
vi = core.VideoStream()
vi.codec = 'dshow'
self.video.append(vi)
self.all_streams.append(vi)
elif flags & PACKET_TYPE_BITS == PACKET_TYPE_HEADER and \
headerlen >= struct.calcsize(STREAM_HEADER_VIDEO)+1:
# New Directshow Format
htype = header[1:9]
if htype[:5] == 'video':
sh = header[9:struct.calcsize(STREAM_HEADER_VIDEO)+9]
streamheader = struct.unpack( STREAM_HEADER_VIDEO, sh )
vi = core.VideoStream()
(type, ssize, timeunit, samplerate, vi.length, buffersize, \
vi.bitrate, vi.width, vi.height) = streamheader
vi.width /= 65536
vi.height /= 65536
# XXX length, bitrate are very wrong
vi.codec = type
vi.fps = 10000000 / timeunit
self.video.append(vi)
self.all_streams.append(vi)
elif htype[:5] == 'audio':
sha = header[9:struct.calcsize(STREAM_HEADER_AUDIO)+9]
streamheader = struct.unpack( STREAM_HEADER_AUDIO, sha )
ai = core.AudioStream()
(type, ssize, timeunit, ai.samplerate, ai.length, buffersize, \
ai.bitrate, ai.channels, bloc, ai.bitrate) = streamheader
self.samplerate = ai.samplerate
log.debug("Samplerate %d" % self.samplerate)
self.audio.append(ai)
self.all_streams.append(ai)
elif htype[:4] == 'text':
subtitle = core.Subtitle()
# FIXME: add more info
self.subtitles.append(subtitle)
self.all_streams.append(subtitle)
else:
log.debug("Unknown Header")
def _extractHeaderString(self,header):
len = struct.unpack( '<I', header[:4] )[0]
try:
return (len+4,str(header[4:4+len], 'utf-8'))
except (KeyError, IndexError, UnicodeDecodeError):
return (len+4,None)
Parser = Ogm
| mit |
amenonsen/ansible | lib/ansible/modules/network/f5/bigip_profile_tcp.py | 5 | 21521 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_profile_tcp
short_description: Manage TCP profiles on a BIG-IP
description:
- Manage TCP profiles on a BIG-IP. Many TCP profiles; each with their
own adjustments to the standard C(tcp) profile. Users of this module should be aware
that many of the adjustable knobs have no module default. Instead, the default is
assigned by the BIG-IP system itself which, in most cases, is acceptable.
version_added: 2.6
options:
name:
description:
- Specifies the name of the profile.
type: str
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(tcp) profile.
type: str
idle_timeout:
description:
- Specifies the length of time that a connection is idle (has no traffic) before
the connection is eligible for deletion.
- When creating a new profile, if this parameter is not specified, the remote
device will choose a default value appropriate for the profile, based on its
C(parent) profile.
- When a number is specified, indicates the number of seconds that the TCP
connection can remain idle before the system deletes it.
- When C(0), or C(indefinite), specifies that the system does not delete TCP connections
regardless of how long they remain idle.
type: str
time_wait_recycle:
description:
- Specifies that connections in a TIME-WAIT state are reused, if a SYN packet,
indicating a request for a new connection, is received.
- When C(no), connections in a TIME-WAIT state remain unused for a specified length of time.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
version_added: 2.7
nagle:
description:
- When C(enabled) the system applies Nagle's algorithm to reduce the number of short segments on the network.
- When C(auto), the use of Nagle's algorithm is decided based on network conditions.
- Note that for interactive protocols such as Telnet, rlogin, or SSH, F5 recommends disabling this setting on
high-latency networks, to improve application responsiveness.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: str
choices:
- auto
- enabled
- disabled
version_added: 2.9
early_retransmit:
description:
- When C(yes) the system uses early fast retransmits to reduce the recovery time for connections that are
receive-buffer or user-data limited.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: bool
version_added: 2.9
proxy_options:
description:
- When C(yes) the system advertises an option, such as a time-stamp to the server only if it was negotiated
with the client.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: bool
version_added: 2.9
initial_congestion_window_size:
description:
- Specifies the initial congestion window size for connections to this destination. The actual window size is
this value multiplied by the MSS for the same connection.
- When set to C(0) the system uses the values specified in RFC2414.
- The valid value range is 0 - 16 inclusive.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: int
version_added: 2.9
initial_receive_window_size:
description:
- Specifies the initial receive window size for connections to this destination. The actual window size is
this value multiplied by the MSS for the same connection.
- When set to C(0) the system uses the Slow Start value.
- The valid value range is 0 - 16 inclusive.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: int
version_added: 2.9
syn_rto_base:
description:
- Specifies the initial RTO C(Retransmission TimeOut) base multiplier for SYN retransmission, in C(milliseconds).
- This value is modified by the exponential backoff table to select the interval for subsequent retransmissions.
- The valid value range is 0 - 5000 inclusive.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: int
version_added: 2.9
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a TCP profile
bigip_profile_tcp:
name: foo
parent: f5-tcp-progressive
time_wait_recycle: no
idle_timeout: 300
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
parent:
description: The new parent of the resource.
returned: changed
type: str
sample: f5-tcp-optimized
idle_timeout:
description: The new idle timeout of the resource.
returned: changed
type: int
sample: 100
time_wait_recycle:
description: Reuse connections in TIME-WAIT state.
returned: changed
type: bool
sample: yes
nagle:
description: Specifies the use of Nagle's algorithm.
returned: changed
type: str
sample: auto
early_retransmit:
description: Specifies the use of early fast retransmits.
returned: changed
type: bool
sample: yes
proxy_options:
description: Specifies if that the system advertises negotiated options to the server.
returned: changed
type: bool
sample: no
initial_congestion_window_size:
description: Specifies the initial congestion window size for connections to this destination.
returned: changed
type: int
sample: 5
initial_receive_window_size:
description: Specifies the initial receive window size for connections to this destination.
returned: changed
type: int
sample: 10
syn_rto_base:
description: Specifies the initial Retransmission TimeOut base multiplier for SYN retransmission.
returned: changed
type: int
sample: 2000
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'idleTimeout': 'idle_timeout',
'defaultsFrom': 'parent',
'timeWaitRecycle': 'time_wait_recycle',
'earlyRetransmit': 'early_retransmit',
'proxyOptions': 'proxy_options',
'initCwnd': 'initial_congestion_window_size',
'initRwnd': 'initial_receive_window_size',
'synRtoBase': 'syn_rto_base'
}
api_attributes = [
'idleTimeout',
'defaultsFrom',
'timeWaitRecycle',
'nagle',
'earlyRetransmit',
'proxyOptions',
'initCwnd',
'initRwnd',
'synRtoBase',
]
returnables = [
'idle_timeout',
'parent',
'time_wait_recycle',
'nagle',
'early_retransmit',
'proxy_options',
'initial_congestion_window_size',
'initial_receive_window_size',
'syn_rto_base',
]
updatables = [
'idle_timeout',
'parent',
'time_wait_recycle',
'nagle',
'early_retransmit',
'proxy_options',
'initial_congestion_window_size',
'initial_receive_window_size',
'syn_rto_base',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def idle_timeout(self):
if self._values['idle_timeout'] is None:
return None
if self._values['idle_timeout'] == 'indefinite':
return 4294967295
return int(self._values['idle_timeout'])
@property
def time_wait_recycle(self):
result = flatten_boolean(self._values['time_wait_recycle'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def early_retransmit(self):
result = flatten_boolean(self._values['early_retransmit'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def proxy_options(self):
result = flatten_boolean(self._values['proxy_options'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def initial_congestion_window_size(self):
if self._values['initial_congestion_window_size'] is None:
return None
if 0 <= self._values['initial_congestion_window_size'] <= 16:
return self._values['initial_congestion_window_size']
raise F5ModuleError(
"Valid 'initial_congestion_window_size' must be in range 0 - 16 MSS units."
)
@property
def initial_receive_window_size(self):
if self._values['initial_receive_window_size'] is None:
return None
if 0 <= self._values['initial_receive_window_size'] <= 16:
return self._values['initial_receive_window_size']
raise F5ModuleError(
"Valid 'initial_receive_window_size' must be in range 0 - 16 MSS units."
)
@property
def syn_rto_base(self):
if self._values['syn_rto_base'] is None:
return None
if 0 <= self._values['syn_rto_base'] <= 5000:
return self._values['syn_rto_base']
raise F5ModuleError(
"Valid 'syn_rto_base' must be in range 0 - 5000 miliseconds."
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def idle_timeout(self):
if self._values['idle_timeout'] is None:
return None
if 0 <= self._values['idle_timeout'] <= 4294967295:
return self._values['idle_timeout']
raise F5ModuleError(
"Valid 'idle_timeout' must be in range 1 - 4294967295, or 'indefinite'."
)
class ReportableChanges(Changes):
@property
def idle_timeout(self):
if self._values['idle_timeout'] is None:
return None
if self._values['idle_timeout'] == 4294967295:
return 'indefinite'
return int(self._values['idle_timeout'])
@property
def time_wait_recycle(self):
if self._values['time_wait_recycle'] is None:
return None
elif self._values['time_wait_recycle'] == 'enabled':
return 'yes'
return 'no'
@property
def early_retransmit(self):
result = flatten_boolean(self._values['early_retransmit'])
return result
@property
def proxy_options(self):
result = flatten_boolean(self._values['proxy_options'])
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.parent is None:
self.want.update({'parent': fq_name(self.want.partition, 'tcp')})
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/tcp/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
idle_timeout=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
time_wait_recycle=dict(type='bool'),
nagle=dict(
choices=['enabled', 'disabled', 'auto']
),
early_retransmit=dict(type='bool'),
proxy_options=dict(type='bool'),
initial_congestion_window_size=dict(type='int'),
initial_receive_window_size=dict(type='int'),
syn_rto_base=dict(type='int'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
uw-it-aca/myuw | myuw/test/views/lti/test_photo_list.py | 1 | 2563 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import skipIf
from django.urls import reverse
from django.test.utils import override_settings
from django.test import RequestFactory
from myuw.views.lti.photo_list import LTIPhotoList
from myuw.test.api import missing_url
from myuw.test.views.lti import MyuwLTITest
class TestLTILaunch(MyuwLTITest):
@skipIf(missing_url('myuw_lti_photo_list'), 'myuw urls not configured')
def test_lti_launch(self):
url = reverse('myuw_lti_photo_list')
# Invalid http method
response = self.client.get(
url, HTTP_USER_AGENT="Lynx/2.8.2rel.1 libwww-FM/2.14")
self.assertEquals(response.status_code, 401)
# Invalid launch payload
response = self.client.post(
url, data={},
HTTP_USER_AGENT="Lynx/2.8.2rel.1 libwww-FM/2.14")
self.assertEquals(response.status_code, 401)
@override_settings(BLTI_AES_KEY=b"11111111111111111111111111111111",
BLTI_AES_IV=b"1111111111111111")
class TestLTIPhotoList(MyuwLTITest):
def setUp(self):
self.request = RequestFactory().post(reverse('myuw_lti_photo_list'))
session = self.client.session
session.save()
self.request.session = session
def test_context_data(self):
blti_data = {
'custom_canvas_course_id': 12345,
'lis_course_offering_sourcedid': '2013-spring-ESS-102-A',
'custom_canvas_user_id': 123456,
'context_label': 'ESS 102 A'
}
kwargs = {
'request': self.request,
'blti_params': blti_data,
}
context = LTIPhotoList().get_context_data(**kwargs)
self.assertEquals(context['lti_course_name'], 'ESS 102 A')
self.assertEquals(context['section'], '2013-spring-ESS-102-AA')
self.assertEquals(len(context['sections']), 2)
def test_context_data_no_sections(self):
blti_data = {
'custom_canvas_course_id': 12346,
'lis_course_offering_sourcedid': '2013-spring-ESS-102-B',
'custom_canvas_user_id': 123456,
'context_label': 'ESS 102 B'
}
kwargs = {
'request': self.request,
'blti_params': blti_data,
}
context = LTIPhotoList().get_context_data(**kwargs)
self.assertEquals(context['lti_course_name'], 'ESS 102 B')
self.assertEquals(context['section'], '')
self.assertEquals(len(context['sections']), 0)
| apache-2.0 |
lucciano/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Scanner/__init__.py | 61 | 14716 | """SCons.Scanner
The Scanner package for the SCons software construction utility.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/__init__.py 5134 2010/08/16 23:02:40 bdeegan"
import re
import SCons.Node.FS
import SCons.Util
class _Null(object):
pass
# This is used instead of None as a default argument value so None can be
# used as an actual argument value.
_null = _Null
def Scanner(function, *args, **kw):
"""
Public interface factory function for creating different types
of Scanners based on the different types of "functions" that may
be supplied.
TODO: Deprecate this some day. We've moved the functionality
inside the Base class and really don't need this factory function
any more. It was, however, used by some of our Tool modules, so
the call probably ended up in various people's custom modules
patterned on SCons code.
"""
if SCons.Util.is_Dict(function):
return Selector(function, *args, **kw)
else:
return Base(function, *args, **kw)
class FindPathDirs(object):
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env[self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
class Base(object):
"""
The base class for dependency scanners. This implements
straightforward, single-pass scanning of a single file.
"""
def __init__(self,
function,
name = "NONE",
argument = _null,
skeys = _null,
path_function = None,
# Node.FS.Base so that, by default, it's okay for a
# scanner to return a Dir, File or Entry.
node_class = SCons.Node.FS.Base,
node_factory = None,
scan_check = None,
recursive = None):
"""
Construct a new scanner object given a scanner function.
'function' - a scanner function taking two or three
arguments and returning a list of strings.
'name' - a name for identifying this scanner object.
'argument' - an optional argument that, if specified, will be
passed to both the scanner function and the path_function.
'skeys' - an optional list argument that can be used to determine
which scanner should be used for a given Node. In the case of File
nodes, for example, the 'skeys' would be file suffixes.
'path_function' - a function that takes four or five arguments
(a construction environment, Node for the directory containing
the SConscript file that defined the primary target, list of
target nodes, list of source nodes, and optional argument for
this instance) and returns a tuple of the directories that can
be searched for implicit dependency files. May also return a
callable() which is called with no args and returns the tuple
(supporting Bindable class).
'node_class' - the class of Nodes which this scan will return.
If node_class is None, then this scanner will not enforce any
Node conversion and will return the raw results from the
underlying scanner function.
'node_factory' - the factory function to be called to translate
the raw results returned by the scanner function into the
expected node_class objects.
'scan_check' - a function to be called to first check whether
this node really needs to be scanned.
'recursive' - specifies that this scanner should be invoked
recursively on all of the implicit dependencies it returns
(the canonical example being #include lines in C source files).
May be a callable, which will be called to filter the list
of nodes found to select a subset for recursive scanning
(the canonical example being only recursively scanning
subdirectories within a directory).
The scanner function's first argument will be a Node that should
be scanned for dependencies, the second argument will be an
Environment object, the third argument will be the tuple of paths
returned by the path_function, and the fourth argument will be
the value passed into 'argument', and the returned list should
contain the Nodes for all the direct dependencies of the file.
Examples:
s = Scanner(my_scanner_function)
s = Scanner(function = my_scanner_function)
s = Scanner(function = my_scanner_function, argument = 'foo')
"""
# Note: this class could easily work with scanner functions that take
# something other than a filename as an argument (e.g. a database
# node) and a dependencies list that aren't file names. All that
# would need to be changed is the documentation.
self.function = function
self.path_function = path_function
self.name = name
self.argument = argument
if skeys is _null:
if SCons.Util.is_Dict(function):
skeys = list(function.keys())
else:
skeys = []
self.skeys = skeys
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
if callable(recursive):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
else:
self.recurse_nodes = self._recurse_no_nodes
def path(self, env, dir=None, target=None, source=None):
if not self.path_function:
return ()
if not self.argument is _null:
return self.path_function(env, dir, target, source, self.argument)
else:
return self.path_function(env, dir, target, source)
def __call__(self, node, env, path = ()):
"""
This method scans a single object. 'node' is the node
that will be passed to the scanner function, and 'env' is the
environment that will be passed to the scanner function. A list of
direct dependency nodes for the specified node will be returned.
"""
if self.scan_check and not self.scan_check(node, env):
return []
self = self.select(node)
if not self.argument is _null:
list = self.function(node, env, path, self.argument)
else:
list = self.function(node, env, path)
kw = {}
if hasattr(node, 'dir'):
kw['directory'] = node.dir
node_factory = env.get_factory(self.node_factory)
nodes = []
for l in list:
if self.node_class and not isinstance(l, self.node_class):
l = node_factory(l, **kw)
nodes.append(l)
return nodes
def __cmp__(self, other):
try:
return cmp(self.__dict__, other.__dict__)
except AttributeError:
# other probably doesn't have a __dict__
return cmp(self.__dict__, other)
def __hash__(self):
return id(self)
def __str__(self):
return self.name
def add_skey(self, skey):
"""Add a skey to the list of skeys"""
self.skeys.append(skey)
def get_skeys(self, env=None):
if env and SCons.Util.is_String(self.skeys):
return env.subst_list(self.skeys)[0]
return self.skeys
def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except KeyError:
return None
else:
return self
def _recurse_all_nodes(self, nodes):
return nodes
def _recurse_no_nodes(self, nodes):
return []
recurse_nodes = _recurse_no_nodes
def add_scanner(self, skey, scanner):
self.function[skey] = scanner
self.add_skey(skey)
class Selector(Base):
"""
A class for selecting a more specific scanner based on the
scanner_key() (suffix) for a specific Node.
TODO: This functionality has been moved into the inner workings of
the Base class, and this class will be deprecated at some point.
(It was never exposed directly as part of the public interface,
although it is used by the Scanner() factory function that was
used by various Tool modules and therefore was likely a template
for custom modules that may be out there.)
"""
def __init__(self, dict, *args, **kw):
Base.__init__(self, None, *args, **kw)
self.dict = dict
self.skeys = list(dict.keys())
def __call__(self, node, env, path = ()):
return self.select(node)(node, env, path)
def select(self, node):
try:
return self.dict[node.scanner_key()]
except KeyError:
return None
def add_scanner(self, skey, scanner):
self.dict[skey] = scanner
self.add_skey(skey)
class Current(Base):
"""
A class for scanning files that are source files (have no builder)
or are derived files and are current (which implies that they exist,
either locally or in a repository).
"""
def __init__(self, *args, **kw):
def current_check(node, env):
return not node.has_builder() or node.is_up_to_date()
kw['scan_check'] = current_check
Base.__init__(self, *args, **kw)
class Classic(Current):
"""
A Scanner subclass to contain the common logic for classic CPP-style
include scanning, but which can be customized to use different
regular expressions to find the includes.
Note that in order for this to work "out of the box" (without
overriding the find_include() and sort_key() methods), the regular
expression passed to the constructor must return the name of the
include file in group 0.
"""
def __init__(self, name, suffixes, path_variable, regex, *args, **kw):
self.cre = re.compile(regex, re.M)
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, path)
kw['function'] = _scan
kw['path_function'] = FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
Current.__init__(self, *args, **kw)
def find_include(self, include, source_dir, path):
n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path))
return n, include
def sort_key(self, include):
return SCons.Node.FS._my_normcase(include)
def find_include_names(self, node):
return self.cre.findall(node.get_text_contents())
def scan(self, node, path=()):
# cache the includes list in node so we only scan it once:
if node.includes is not None:
includes = node.includes
else:
includes = self.find_include_names (node)
# Intern the names of the include files. Saves some memory
# if the same header is included many times.
node.includes = list(map(SCons.Util.silent_intern, includes))
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the #include line (including the
# " or <, since that may affect what file is found), which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
nodes.append((self.sort_key(include), n))
return [pair[1] for pair in sorted(nodes)]
class ClassicCPP(Classic):
"""
A Classic Scanner subclass which takes into account the type of
bracketing used to include the file, and uses classic CPP rules
for searching for the files based on the bracketing.
Note that in order for this to work, the regular expression passed
to the constructor must return the leading bracket in group 0, and
the contained filename in group 1.
"""
def find_include(self, include, source_dir, path):
if include[0] == '"':
paths = (source_dir,) + tuple(path)
else:
paths = tuple(path) + (source_dir,)
n = SCons.Node.FS.find_file(include[1], paths)
i = SCons.Util.silent_intern(include[1])
return n, i
def sort_key(self, include):
return SCons.Node.FS._my_normcase(' '.join(include))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
KMK-ONLINE/ansible | lib/ansible/playbook/handler_task_include.py | 97 | 1346 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
#from ansible.inventory.host import Host
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler import Handler
class HandlerTaskInclude(Handler, TaskInclude):
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = HandlerTaskInclude(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
| gpl-3.0 |
rsheftel/pandas_market_calendars | pandas_market_calendars/exchange_calendars_mirror.py | 1 | 1912 | """
Imported calendars from the exchange_calendars project
GitHub: https://github.com/gerrymanoim/exchange_calendars
"""
from datetime import time
from .market_calendar import MarketCalendar
import exchange_calendars
class TradingCalendar(MarketCalendar):
def __init__(self, open_time=None, close_time=None):
self._tc = self._tc_class() # noqa: _tc.class is defined in the class generator below
super().__init__(open_time, close_time)
@property
def name(self):
return self._tc.name
@property
def tz(self):
return self._tc.tz
@property
def open_time_default(self):
return self._tc.open_times[0][1].replace(tzinfo=self.tz)
@property
def close_time_default(self):
return self._tc.close_times[0][1].replace(tzinfo=self.tz)
@property
def break_start(self):
tc_time = self._tc.break_start_times
return tc_time[0][1] if tc_time else None
@property
def break_end(self):
tc_time = self._tc.break_end_times
return tc_time[0][1] if tc_time else None
@property
def regular_holidays(self):
return self._tc.regular_holidays
@property
def adhoc_holidays(self):
return self._tc.adhoc_holidays
@property
def special_opens(self):
return self._tc.special_opens
@property
def special_opens_adhoc(self):
return self._tc.special_opens_adhoc
@property
def special_closes(self):
return self._tc.special_closes
@property
def special_closes_adhoc(self):
return self._tc.special_closes_adhoc
calendars = exchange_calendars.calendar_utils._default_calendar_factories # noqa
for exchange in calendars:
locals()[exchange + 'ExchangeCalendar'] = type(exchange, (TradingCalendar, ),
{'_tc_class': calendars[exchange], 'alias': [exchange]})
| mit |
leppa/home-assistant | homeassistant/components/ebusd/__init__.py | 3 | 3931 | """Support for Ebusd daemon for communication with eBUS heating systems."""
from datetime import timedelta
import logging
import socket
import ebusdpy
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PORT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import Throttle
from .const import DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ebusd"
DEFAULT_PORT = 8888
CONF_CIRCUIT = "circuit"
CACHE_TTL = 900
SERVICE_EBUSD_WRITE = "ebusd_write"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=15)
def verify_ebusd_config(config):
"""Verify eBusd config."""
circuit = config[CONF_CIRCUIT]
for condition in config[CONF_MONITORED_CONDITIONS]:
if condition not in SENSOR_TYPES[circuit]:
raise vol.Invalid("Condition '" + condition + "' not in '" + circuit + "'.")
return config
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
vol.All(
{
vol.Required(CONF_CIRCUIT): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]): cv.ensure_list,
},
verify_ebusd_config,
)
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the eBusd component."""
conf = config[DOMAIN]
name = conf[CONF_NAME]
circuit = conf[CONF_CIRCUIT]
monitored_conditions = conf.get(CONF_MONITORED_CONDITIONS)
server_address = (conf.get(CONF_HOST), conf.get(CONF_PORT))
try:
_LOGGER.debug("Ebusd integration setup started")
ebusdpy.init(server_address)
hass.data[DOMAIN] = EbusdData(server_address, circuit)
sensor_config = {
CONF_MONITORED_CONDITIONS: monitored_conditions,
"client_name": name,
"sensor_types": SENSOR_TYPES[circuit],
}
load_platform(hass, "sensor", DOMAIN, sensor_config, config)
hass.services.register(DOMAIN, SERVICE_EBUSD_WRITE, hass.data[DOMAIN].write)
_LOGGER.debug("Ebusd integration setup completed")
return True
except (socket.timeout, socket.error):
return False
class EbusdData:
"""Get the latest data from Ebusd."""
def __init__(self, address, circuit):
"""Initialize the data object."""
self._circuit = circuit
self._address = address
self.value = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, name, stype):
"""Call the Ebusd API to update the data."""
try:
_LOGGER.debug("Opening socket to ebusd %s", name)
command_result = ebusdpy.read(
self._address, self._circuit, name, stype, CACHE_TTL
)
if command_result is not None:
if "ERR:" in command_result:
_LOGGER.warning(command_result)
else:
self.value[name] = command_result
except RuntimeError as err:
_LOGGER.error(err)
raise RuntimeError(err)
def write(self, call):
"""Call write methon on ebusd."""
name = call.data.get("name")
value = call.data.get("value")
try:
_LOGGER.debug("Opening socket to ebusd %s", name)
command_result = ebusdpy.write(self._address, self._circuit, name, value)
if command_result is not None:
if "done" not in command_result:
_LOGGER.warning("Write command failed: %s", name)
except RuntimeError as err:
_LOGGER.error(err)
| apache-2.0 |
yakovenkodenis/rethinkdb | drivers/python/rethinkdb/net_asyncio.py | 3 | 10221 | # Copyright 2015 RethinkDB, all rights reserved.
import asyncio
import contextlib
import socket
import struct
from . import ql2_pb2 as p
from .ast import ReQLDecoder
from .net import decodeUTF, Query, Response, Cursor, maybe_profile
from .net import Connection as ConnectionBase
from .errors import *
__all__ = ['Connection']
pResponse = p.Response.ResponseType
pQuery = p.Query.QueryType
@asyncio.coroutine
def _read_until(streamreader, delimiter):
"""Naive implementation of reading until a delimiter"""
buffer = bytearray()
while True:
c = yield from streamreader.read(1)
if c == b'':
break # EOF
buffer.append(c[0])
if c == delimiter:
break
return bytes(buffer)
def reusable_waiter(loop, timeout):
"""Wait for something, with a timeout from when the waiter was created.
This can be used in loops::
waiter = reusable_waiter(event_loop, 10.0)
while some_condition:
yield from waiter(some_future)
"""
if timeout is not None:
deadline = loop.time() + timeout
else:
deadline = None
@asyncio.coroutine
def wait(future):
if deadline is not None:
new_timeout = max(deadline - loop.time(), 0)
else:
new_timeout = None
return (yield from asyncio.wait_for(future, new_timeout, loop=loop))
return wait
@contextlib.contextmanager
def translate_timeout_errors():
try:
yield
except asyncio.TimeoutError:
raise ReqlTimeoutError()
# The asyncio implementation of the Cursor object:
# The `new_response` Future notifies any waiting coroutines that the can attempt
# to grab the next result. In addition, the waiting coroutine will schedule a
# timeout at the given deadline (if provided), at which point the future will be
# errored.
class AsyncioCursor(Cursor):
def __init__(self, *args, **kwargs):
Cursor.__init__(self, *args, **kwargs)
self.new_response = asyncio.Future()
def _extend(self, res):
Cursor._extend(self, res)
self.new_response.set_result(True)
self.new_response = asyncio.Future()
# Convenience function so users know when they've hit the end of the cursor
# without having to catch an exception
@asyncio.coroutine
def fetch_next(self, wait=True):
timeout = Cursor._wait_to_timeout(wait)
waiter = reusable_waiter(self.conn._io_loop, timeout)
while len(self.items) == 0 and self.error is None:
self._maybe_fetch_batch()
with translate_timeout_errors():
yield from waiter(asyncio.shield(self.new_response))
# If there is a (non-empty) error to be received, we return True, so the
# user will receive it on the next `next` call.
return len(self.items) != 0 or not isinstance(self.error, RqlCursorEmpty)
def _empty_error(self):
# We do not have RqlCursorEmpty inherit from StopIteration as that interferes
# with mechanisms to return from a coroutine.
return RqlCursorEmpty()
@asyncio.coroutine
def _get_next(self, timeout):
waiter = reusable_waiter(self.conn._io_loop, timeout)
while len(self.items) == 0:
self._maybe_fetch_batch()
if self.error is not None:
raise self.error
with translate_timeout_errors():
yield from waiter(asyncio.shield(self.new_response))
return self.items.popleft()
def _maybe_fetch_batch(self):
if self.error is None and \
len(self.items) < self.threshold and \
self.outstanding_requests == 0:
self.outstanding_requests += 1
asyncio.async(self.conn._parent._continue(self))
class ConnectionInstance(object):
_streamreader = None
_streamwriter = None
def __init__(self, parent, io_loop=None):
self._parent = parent
self._closing = False
self._user_queries = { }
self._cursor_cache = { }
self._ready = asyncio.Future()
self._io_loop = io_loop
if self._io_loop is None:
self._io_loop = asyncio.get_event_loop()
@asyncio.coroutine
def connect(self, timeout):
try:
self._streamreader, self._streamwriter = yield from \
asyncio.open_connection(self._parent.host, self._parent.port,
family=socket.AF_INET, loop=self._io_loop)
self._streamwriter.get_extra_info('socket').setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception as err:
raise ReqlDriverError('Could not connect to %s:%s. Error: %s' %
(self._parent.host, self._parent.port, str(err)))
try:
self._streamwriter.write(self._parent.handshake)
with translate_timeout_errors():
response = yield from asyncio.wait_for(
_read_until(self._streamreader, b'\0'),
timeout, loop=self._io_loop,
)
except Exception as err:
raise ReqlDriverError(
'Connection interrupted during handshake with %s:%s. Error: %s' %
(self._parent.host, self._parent.port, str(err)))
message = decodeUTF(response[:-1]).split('\n')[0]
if message != 'SUCCESS':
self.close(False, None)
if message == "ERROR: Incorrect authorization key":
raise ReqlAuthError(self._parent.host, self._parent.port)
else:
raise ReqlDriverError('Server dropped connection with message: "%s"' %
(message, ))
# Start a parallel function to perform reads
# store a reference to it so it doesn't get destroyed
self._reader_task = asyncio.async(self._reader(), loop=self._io_loop)
return self._parent
def is_open(self):
return not (self._closing or self._streamreader.at_eof())
@asyncio.coroutine
def close(self, noreply_wait, token, exception=None):
self._closing = True
if exception is not None:
err_message = "Connection is closed (%s)." % str(exception)
else:
err_message = "Connection is closed."
# Cursors may remove themselves when errored, so copy a list of them
for cursor in list(self._cursor_cache.values()):
cursor._error(err_message)
for query, future in iter(self._user_queries.values()):
future.set_exception(ReqlDriverError(err_message))
self._user_queries = { }
self._cursor_cache = { }
if noreply_wait:
noreply = Query(pQuery.NOREPLY_WAIT, token, None, None)
yield from self.run_query(noreply, False)
self._streamwriter.close()
return None
@asyncio.coroutine
def run_query(self, query, noreply):
self._streamwriter.write(query.serialize())
if noreply:
return None
response_future = asyncio.Future()
self._user_queries[query.token] = (query, response_future)
return (yield from response_future)
# The _reader coroutine runs in parallel, reading responses
# off of the socket and forwarding them to the appropriate Future or Cursor.
# This is shut down as a consequence of closing the stream, or an error in the
# socket/protocol from the server. Unexpected errors in this coroutine will
# close the ConnectionInstance and be passed to any open Futures or Cursors.
@asyncio.coroutine
def _reader(self):
try:
while True:
buf = yield from self._streamreader.readexactly(12)
(token, length,) = struct.unpack("<qL", buf)
buf = yield from self._streamreader.readexactly(length)
cursor = self._cursor_cache.get(token)
if cursor is not None:
cursor._extend(buf)
elif token in self._user_queries:
# Do not pop the query from the dict until later, so
# we don't lose track of it in case of an exception
query, future = self._user_queries[token]
res = Response(token, buf,
self._parent._get_json_decoder(query.global_optargs))
if res.type == pResponse.SUCCESS_ATOM:
future.set_result(maybe_profile(res.data[0], res))
elif res.type in (pResponse.SUCCESS_SEQUENCE,
pResponse.SUCCESS_PARTIAL):
cursor = AsyncioCursor(self, query, res)
future.set_result(maybe_profile(cursor, res))
elif res.type == pResponse.WAIT_COMPLETE:
future.set_result(None)
else:
future.set_exception(res.make_error(query))
del self._user_queries[token]
elif not self._closing:
raise ReqlDriverError("Unexpected response received.")
except Exception as ex:
if not self._closing:
yield from self.close(False, None, ex)
class Connection(ConnectionBase):
def __init__(self, *args, **kwargs):
ConnectionBase.__init__(self, ConnectionInstance, *args, **kwargs)
try:
self.port = int(self.port)
except ValueError:
raise ReqlDriverError("Could not convert port %s to an integer." % self.port)
@asyncio.coroutine
def reconnect(self, noreply_wait=True, timeout=None):
# We close before reconnect so reconnect doesn't try to close us
# and then fail to return the Future (this is a little awkward).
yield from self.close(noreply_wait)
self._instance = self._conn_type(self, **self._child_kwargs)
return (yield from self._instance.connect(timeout))
@asyncio.coroutine
def close(self, *args, **kwargs):
if self._instance is None:
return None
return (yield from ConnectionBase.close(self, *args, **kwargs))
| agpl-3.0 |
vitan/django | tests/migrations/test_state.py | 3 | 34875 | from django.apps.registry import Apps
from django.db import models
from django.db.migrations.operations import DeleteModel, RemoveField
from django.db.migrations.state import (
InvalidBasesError, ModelState, ProjectState, get_related_models_recursive,
)
from django.test import SimpleTestCase, TestCase, override_settings
from .models import (
FoodManager, FoodQuerySet, ModelWithCustomBase, NoMigrationFoodManager,
)
class StateTests(TestCase):
"""
Tests state construction, rendering and modification by operations.
"""
def test_create(self):
"""
Tests making a ProjectState from an Apps
"""
new_apps = Apps(["migrations"])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = "migrations"
apps = new_apps
unique_together = ["name", "bio"]
index_together = ["bio", "age"]
class AuthorProxy(Author):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
ordering = ["name"]
class SubAuthor(Author):
width = models.FloatField(null=True)
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
title = models.CharField(max_length=1000)
author = models.ForeignKey(Author)
contributors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
verbose_name = "tome"
db_table = "test_tome"
class Food(models.Model):
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
food_no_mgr = NoMigrationFoodManager('x', 'y')
class Meta:
app_label = "migrations"
apps = new_apps
class FoodNoManagers(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class FoodNoDefaultManager(models.Model):
food_no_mgr = NoMigrationFoodManager('x', 'y')
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
class Meta:
app_label = "migrations"
apps = new_apps
mgr1 = FoodManager('a', 'b')
mgr2 = FoodManager('x', 'y', c=3, d=4)
class FoodOrderedManagers(models.Model):
# The managers on this model should be ordered by their creation
# counter and not by the order in model body
food_no_mgr = NoMigrationFoodManager('x', 'y')
food_mgr2 = mgr2
food_mgr1 = mgr1
class Meta:
app_label = "migrations"
apps = new_apps
project_state = ProjectState.from_apps(new_apps)
author_state = project_state.models['migrations', 'author']
author_proxy_state = project_state.models['migrations', 'authorproxy']
sub_author_state = project_state.models['migrations', 'subauthor']
book_state = project_state.models['migrations', 'book']
food_state = project_state.models['migrations', 'food']
food_no_managers_state = project_state.models['migrations', 'foodnomanagers']
food_no_default_manager_state = project_state.models['migrations', 'foodnodefaultmanager']
food_order_manager_state = project_state.models['migrations', 'foodorderedmanagers']
self.assertEqual(author_state.app_label, "migrations")
self.assertEqual(author_state.name, "Author")
self.assertEqual([x for x, y in author_state.fields], ["id", "name", "bio", "age"])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertEqual(author_state.fields[2][1].null, False)
self.assertEqual(author_state.fields[3][1].null, True)
self.assertEqual(author_state.options, {"unique_together": {("name", "bio")}, "index_together": {("bio", "age")}})
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(book_state.app_label, "migrations")
self.assertEqual(book_state.name, "Book")
self.assertEqual([x for x, y in book_state.fields], ["id", "title", "author", "contributors"])
self.assertEqual(book_state.fields[1][1].max_length, 1000)
self.assertEqual(book_state.fields[2][1].null, False)
self.assertEqual(book_state.fields[3][1].__class__.__name__, "ManyToManyField")
self.assertEqual(book_state.options, {"verbose_name": "tome", "db_table": "test_tome"})
self.assertEqual(book_state.bases, (models.Model, ))
self.assertEqual(author_proxy_state.app_label, "migrations")
self.assertEqual(author_proxy_state.name, "AuthorProxy")
self.assertEqual(author_proxy_state.fields, [])
self.assertEqual(author_proxy_state.options, {"proxy": True, "ordering": ["name"]})
self.assertEqual(author_proxy_state.bases, ("migrations.author", ))
self.assertEqual(sub_author_state.app_label, "migrations")
self.assertEqual(sub_author_state.name, "SubAuthor")
self.assertEqual(len(sub_author_state.fields), 2)
self.assertEqual(sub_author_state.bases, ("migrations.author", ))
# The default manager is used in migrations
self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr'])
self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2))
# No explicit managers defined. Migrations will fall back to the default
self.assertEqual(food_no_managers_state.managers, [])
# food_mgr is used in migration but isn't the default mgr, hence add the
# default
self.assertEqual([name for name, mgr in food_no_default_manager_state.managers],
['food_no_mgr', 'food_mgr'])
self.assertEqual(food_no_default_manager_state.managers[0][1].__class__, models.Manager)
self.assertIsInstance(food_no_default_manager_state.managers[1][1], FoodManager)
self.assertEqual([name for name, mgr in food_order_manager_state.managers],
['food_mgr1', 'food_mgr2'])
self.assertEqual([mgr.args for name, mgr in food_order_manager_state.managers],
[('a', 'b', 1, 2), ('x', 'y', 3, 4)])
def test_render(self):
"""
Tests rendering a ProjectState into an Apps.
"""
project_state = ProjectState()
project_state.add_model(ModelState(
app_label="migrations",
name="Tag",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
))
project_state.add_model(ModelState(
app_label="migrations",
name="SubTag",
fields=[
('tag_ptr', models.OneToOneField(
auto_created=True,
primary_key=True,
to_field='id',
serialize=False,
to='migrations.Tag',
)),
("awesome", models.BooleanField()),
],
bases=("migrations.Tag",),
))
base_mgr = models.Manager()
mgr1 = FoodManager('a', 'b')
mgr2 = FoodManager('x', 'y', c=3, d=4)
project_state.add_model(ModelState(
app_label="migrations",
name="Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
# The ordering we really want is objects, mgr1, mgr2
('default', base_mgr),
('food_mgr2', mgr2),
('food_mgr1', mgr1),
]
))
new_apps = project_state.apps
self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field("name").max_length, 100)
self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field("hidden").null, False)
self.assertEqual(len(new_apps.get_model("migrations", "SubTag")._meta.local_fields), 2)
Food = new_apps.get_model("migrations", "Food")
managers = sorted(Food._meta.managers)
self.assertEqual([mgr.name for _, mgr, _ in managers],
['default', 'food_mgr1', 'food_mgr2'])
self.assertEqual([mgr.__class__ for _, mgr, _ in managers],
[models.Manager, FoodManager, FoodManager])
self.assertIs(managers[0][1], Food._default_manager)
def test_render_model_inheritance(self):
class Book(models.Model):
title = models.CharField(max_length=1000)
class Meta:
app_label = "migrations"
apps = Apps()
class Novel(Book):
class Meta:
app_label = "migrations"
apps = Apps()
# First, test rendering individually
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(Novel)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent model is in the app registry, it should be fine
ModelState.from_model(Book).render(apps)
ModelState.from_model(Novel).render(apps)
def test_render_model_with_multiple_inheritance(self):
class Foo(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class Bar(models.Model):
class Meta:
app_label = "migrations"
apps = Apps()
class FooBar(Foo, Bar):
class Meta:
app_label = "migrations"
apps = Apps()
class AbstractSubFooBar(FooBar):
class Meta:
abstract = True
apps = Apps()
class SubFooBar(AbstractSubFooBar):
class Meta:
app_label = "migrations"
apps = Apps()
apps = Apps(["migrations"])
# We shouldn't be able to render yet
ms = ModelState.from_model(FooBar)
with self.assertRaises(InvalidBasesError):
ms.render(apps)
# Once the parent models are in the app registry, it should be fine
ModelState.from_model(Foo).render(apps)
self.assertSequenceEqual(ModelState.from_model(Foo).bases, [models.Model])
ModelState.from_model(Bar).render(apps)
self.assertSequenceEqual(ModelState.from_model(Bar).bases, [models.Model])
ModelState.from_model(FooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(FooBar).bases, ['migrations.foo', 'migrations.bar'])
ModelState.from_model(SubFooBar).render(apps)
self.assertSequenceEqual(ModelState.from_model(SubFooBar).bases, ['migrations.foobar'])
def test_render_project_dependencies(self):
"""
Tests that the ProjectState render method correctly renders models
to account for inter-model base dependencies.
"""
new_apps = Apps()
class A(models.Model):
class Meta:
app_label = "migrations"
apps = new_apps
class B(A):
class Meta:
app_label = "migrations"
apps = new_apps
class C(B):
class Meta:
app_label = "migrations"
apps = new_apps
class D(A):
class Meta:
app_label = "migrations"
apps = new_apps
class E(B):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
class F(D):
class Meta:
app_label = "migrations"
apps = new_apps
proxy = True
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.add_model(ModelState.from_model(D))
project_state.add_model(ModelState.from_model(E))
project_state.add_model(ModelState.from_model(F))
final_apps = project_state.apps
self.assertEqual(len(final_apps.get_models()), 6)
# Now make an invalid ProjectState and make sure it fails
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
project_state.add_model(ModelState.from_model(C))
project_state.add_model(ModelState.from_model(F))
with self.assertRaises(InvalidBasesError):
project_state.apps
def test_render_unique_app_labels(self):
"""
Tests that the ProjectState render method doesn't raise an
ImproperlyConfigured exception about unique labels if two dotted app
names have the same last part.
"""
class A(models.Model):
class Meta:
app_label = "django.contrib.auth"
class B(models.Model):
class Meta:
app_label = "vendor.auth"
# Make a ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
self.assertEqual(len(project_state.apps.get_models()), 2)
def test_remove_relations(self):
"""
#24225 - Tests that relations between models are updated while
remaining the relations and references for models of an old state.
"""
class A(models.Model):
class Meta:
app_label = "something"
class B(models.Model):
to_a = models.ForeignKey(A)
class Meta:
app_label = "something"
def get_model_a(state):
return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0]
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1)
old_state = project_state.clone()
operation = RemoveField("b", "to_a")
operation.state_forwards("something", project_state)
# Tests that model from old_state still has the relation
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
self.assertEqual(len(model_a_old._meta.related_objects), 1)
self.assertEqual(len(model_a_new._meta.related_objects), 0)
# Same test for deleted model
project_state = ProjectState()
project_state.add_model(ModelState.from_model(A))
project_state.add_model(ModelState.from_model(B))
old_state = project_state.clone()
operation = DeleteModel("b")
operation.state_forwards("something", project_state)
model_a_old = get_model_a(old_state)
model_a_new = get_model_a(project_state)
self.assertIsNot(model_a_old, model_a_new)
self.assertEqual(len(model_a_old._meta.related_objects), 1)
self.assertEqual(len(model_a_new._meta.related_objects), 0)
def test_equality(self):
"""
Tests that == and != are implemented correctly.
"""
# Test two things that should be equal
project_state = ProjectState()
project_state.add_model(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("hidden", models.BooleanField()),
],
{},
None,
))
project_state.apps # Fill the apps cached property
other_state = project_state.clone()
self.assertEqual(project_state, project_state)
self.assertEqual(project_state, other_state)
self.assertEqual(project_state != project_state, False)
self.assertEqual(project_state != other_state, False)
self.assertNotEqual(project_state.apps, other_state.apps)
# Make a very small change (max_len 99) and see if that affects it
project_state = ProjectState()
project_state.add_model(ModelState(
"migrations",
"Tag",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=99)),
("hidden", models.BooleanField()),
],
{},
None,
))
self.assertNotEqual(project_state, other_state)
self.assertEqual(project_state == other_state, False)
def test_dangling_references_throw_error(self):
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author)
class Meta:
app_label = "migrations"
apps = new_apps
class Magazine(models.Model):
authors = models.ManyToManyField(Author)
class Meta:
app_label = "migrations"
apps = new_apps
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Author))
project_state.add_model(ModelState.from_model(Book))
project_state.add_model(ModelState.from_model(Magazine))
self.assertEqual(len(project_state.apps.get_models()), 3)
# now make an invalid one with a ForeignKey
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Book))
with self.assertRaises(ValueError):
project_state.apps
# and another with ManyToManyField
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Magazine))
with self.assertRaises(ValueError):
project_state.apps
def test_real_apps(self):
"""
Tests that including real apps can resolve dangling FK errors.
This test relies on the fact that contenttypes is always loaded.
"""
new_apps = Apps()
class TestModel(models.Model):
ct = models.ForeignKey("contenttypes.ContentType")
class Meta:
app_label = "migrations"
apps = new_apps
# If we just stick it into an empty state it should fail
project_state = ProjectState()
project_state.add_model(ModelState.from_model(TestModel))
with self.assertRaises(ValueError):
project_state.apps
# If we include the real app it should succeed
project_state = ProjectState(real_apps=["contenttypes"])
project_state.add_model(ModelState.from_model(TestModel))
rendered_state = project_state.apps
self.assertEqual(
len([x for x in rendered_state.get_models() if x._meta.app_label == "migrations"]),
1,
)
def test_ignore_order_wrt(self):
"""
Makes sure ProjectState doesn't include OrderWrt fields when
making from existing models.
"""
new_apps = Apps()
class Author(models.Model):
name = models.TextField()
class Meta:
app_label = "migrations"
apps = new_apps
class Book(models.Model):
author = models.ForeignKey(Author)
class Meta:
app_label = "migrations"
apps = new_apps
order_with_respect_to = "author"
# Make a valid ProjectState and render it
project_state = ProjectState()
project_state.add_model(ModelState.from_model(Author))
project_state.add_model(ModelState.from_model(Book))
self.assertEqual(
[name for name, field in project_state.models["migrations", "book"].fields],
["id", "author"],
)
def test_manager_refer_correct_model_version(self):
"""
#24147 - Tests that managers refer to the correct version of a
historical model
"""
project_state = ProjectState()
project_state.add_model(ModelState(
app_label="migrations",
name="Tag",
fields=[
("id", models.AutoField(primary_key=True)),
("hidden", models.BooleanField()),
],
managers=[
('food_mgr', FoodManager('a', 'b')),
('food_qs', FoodQuerySet.as_manager()),
]
))
old_model = project_state.apps.get_model('migrations', 'tag')
new_state = project_state.clone()
operation = RemoveField("tag", "hidden")
operation.state_forwards("migrations", new_state)
new_model = new_state.apps.get_model('migrations', 'tag')
self.assertIsNot(old_model, new_model)
self.assertIs(old_model, old_model.food_mgr.model)
self.assertIs(old_model, old_model.food_qs.model)
self.assertIs(new_model, new_model.food_mgr.model)
self.assertIs(new_model, new_model.food_qs.model)
self.assertIsNot(old_model.food_mgr, new_model.food_mgr)
self.assertIsNot(old_model.food_qs, new_model.food_qs)
self.assertIsNot(old_model.food_mgr.model, new_model.food_mgr.model)
self.assertIsNot(old_model.food_qs.model, new_model.food_qs.model)
def test_choices_iterator(self):
"""
#24483 - ProjectState.from_apps should not destructively consume
Field.choices iterators.
"""
new_apps = Apps(["migrations"])
choices = [('a', 'A'), ('b', 'B')]
class Author(models.Model):
name = models.CharField(max_length=255)
choice = models.CharField(max_length=255, choices=iter(choices))
class Meta:
app_label = "migrations"
apps = new_apps
ProjectState.from_apps(new_apps)
choices_field = Author._meta.get_field('choice')
self.assertEqual(list(choices_field.choices), choices)
class ModelStateTests(TestCase):
def test_custom_model_base(self):
state = ModelState.from_model(ModelWithCustomBase)
self.assertEqual(state.bases, (models.Model,))
def test_bound_field_sanity_check(self):
field = models.CharField(max_length=1)
field.model = models.Model
with self.assertRaisesMessage(ValueError,
'ModelState.fields cannot be bound to a model - "field" is.'):
ModelState('app', 'Model', [('field', field)])
def test_fields_immutability(self):
"""
Tests that rendering a model state doesn't alter its internal fields.
"""
apps = Apps()
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)])
Model = state.render(apps)
self.assertNotEqual(Model._meta.get_field('name'), field)
def test_repr(self):
field = models.CharField(max_length=1)
state = ModelState('app', 'Model', [('name', field)], bases=['app.A', 'app.B', 'app.C'])
self.assertEqual(repr(state), "<ModelState: 'app.Model'>")
project_state = ProjectState()
project_state.add_model(state)
with self.assertRaisesMessage(InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]"):
project_state.apps
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_create_swappable(self):
"""
Tests making a ProjectState from an Apps with a swappable model
"""
new_apps = Apps(['migrations'])
class Author(models.Model):
name = models.CharField(max_length=255)
bio = models.TextField()
age = models.IntegerField(blank=True, null=True)
class Meta:
app_label = 'migrations'
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
author_state = ModelState.from_model(Author)
self.assertEqual(author_state.app_label, 'migrations')
self.assertEqual(author_state.name, 'Author')
self.assertEqual([x for x, y in author_state.fields], ['id', 'name', 'bio', 'age'])
self.assertEqual(author_state.fields[1][1].max_length, 255)
self.assertEqual(author_state.fields[2][1].null, False)
self.assertEqual(author_state.fields[3][1].null, True)
self.assertEqual(author_state.options, {'swappable': 'TEST_SWAPPABLE_MODEL'})
self.assertEqual(author_state.bases, (models.Model, ))
self.assertEqual(author_state.managers, [])
@override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel')
def test_custom_manager_swappable(self):
"""
Tests making a ProjectState from unused models with custom managers
"""
new_apps = Apps(['migrations'])
class Food(models.Model):
food_mgr = FoodManager('a', 'b')
food_qs = FoodQuerySet.as_manager()
food_no_mgr = NoMigrationFoodManager('x', 'y')
class Meta:
app_label = "migrations"
apps = new_apps
swappable = 'TEST_SWAPPABLE_MODEL'
food_state = ModelState.from_model(Food)
# The default manager is used in migrations
self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr'])
self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2))
class RelatedModelsTests(SimpleTestCase):
def setUp(self):
self.apps = Apps(['migrations.related_models_app'])
def create_model(self, name, foreign_keys=[], bases=(), abstract=False, proxy=False):
test_name = 'related_models_app'
assert not (abstract and proxy)
meta_contents = {
'abstract': abstract,
'app_label': test_name,
'apps': self.apps,
'proxy': proxy,
}
meta = type(str("Meta"), tuple(), meta_contents)
if not bases:
bases = (models.Model,)
body = {
'Meta': meta,
'__module__': "__fake__",
}
fname_base = fname = '%s_%%d' % name.lower()
for i, fk in enumerate(foreign_keys, 1):
fname = fname_base % i
body[fname] = fk
return type(name, bases, body)
def assertRelated(self, model, needle):
self.assertEqual(
get_related_models_recursive(model),
{(n._meta.app_label, n._meta.model_name) for n in needle},
)
def test_unrelated(self):
A = self.create_model("A")
B = self.create_model("B")
self.assertRelated(A, [])
self.assertRelated(B, [])
def test_direct_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B')])
B = self.create_model("B")
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_direct_hidden_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B', related_name='+')])
B = self.create_model("B")
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_nested_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B')])
B = self.create_model("B", foreign_keys=[models.ForeignKey('C')])
C = self.create_model("C")
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_two_sided(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B')])
B = self.create_model("B", foreign_keys=[models.ForeignKey('A')])
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_circle(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('B')])
B = self.create_model("B", foreign_keys=[models.ForeignKey('C')])
C = self.create_model("C", foreign_keys=[models.ForeignKey('A')])
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,))
self.assertRelated(A, [B])
self.assertRelated(B, [A])
def test_nested_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,))
C = self.create_model("C", bases=(B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_multiple_bases(self):
A = self.create_model("A")
B = self.create_model("B")
C = self.create_model("C", bases=(A, B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [A, C])
self.assertRelated(C, [A, B])
def test_multiple_nested_bases(self):
A = self.create_model("A")
B = self.create_model("B")
C = self.create_model("C", bases=(A, B,))
D = self.create_model("D")
E = self.create_model("E", bases=(D,))
F = self.create_model("F", bases=(C, E,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, C, D, E, F])
self.assertRelated(B, [A, C, D, E, F])
self.assertRelated(C, [A, B, D, E, F])
self.assertRelated(D, [A, B, C, E, F])
self.assertRelated(E, [A, B, C, D, F])
self.assertRelated(F, [A, B, C, D, E])
self.assertRelated(Y, [Z])
self.assertRelated(Z, [Y])
def test_base_to_base_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('Y')])
B = self.create_model("B", bases=(A,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, Y, Z])
self.assertRelated(B, [A, Y, Z])
self.assertRelated(Y, [A, B, Z])
self.assertRelated(Z, [A, B, Y])
def test_base_to_subclass_fk(self):
A = self.create_model("A", foreign_keys=[models.ForeignKey('Z')])
B = self.create_model("B", bases=(A,))
Y = self.create_model("Y")
Z = self.create_model("Z", bases=(Y,))
self.assertRelated(A, [B, Y, Z])
self.assertRelated(B, [A, Y, Z])
self.assertRelated(Y, [A, B, Z])
self.assertRelated(Z, [A, B, Y])
def test_direct_m2m(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B')])
B = self.create_model("B")
self.assertRelated(A, [A.a_1.rel.through, B])
self.assertRelated(B, [A, A.a_1.rel.through])
def test_direct_m2m_self(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('A')])
self.assertRelated(A, [A.a_1.rel.through])
def test_intermediate_m2m_self(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('A', through='T')])
T = self.create_model("T", foreign_keys=[models.ForeignKey('A'), models.ForeignKey('A')])
self.assertRelated(A, [T])
self.assertRelated(T, [A])
def test_intermediate_m2m(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
T = self.create_model("T", foreign_keys=[models.ForeignKey('A'), models.ForeignKey('B')])
self.assertRelated(A, [B, T])
self.assertRelated(B, [A, T])
self.assertRelated(T, [A, B])
def test_intermediate_m2m_extern_fk(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
Z = self.create_model("Z")
T = self.create_model("T", foreign_keys=[
models.ForeignKey('A'), models.ForeignKey('B'), models.ForeignKey('Z'),
])
self.assertRelated(A, [B, T, Z])
self.assertRelated(B, [A, T, Z])
self.assertRelated(T, [A, B, Z])
self.assertRelated(Z, [A, B, T])
def test_intermediate_m2m_base(self):
A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')])
B = self.create_model("B")
S = self.create_model("S")
T = self.create_model("T", foreign_keys=[models.ForeignKey('A'), models.ForeignKey('B')], bases=(S,))
self.assertRelated(A, [B, S, T])
self.assertRelated(B, [A, S, T])
self.assertRelated(S, [A, B, T])
self.assertRelated(T, [A, B, S])
def test_abstract_base(self):
A = self.create_model("A", abstract=True)
B = self.create_model("B", bases=(A,))
self.assertRelated(A, [B])
self.assertRelated(B, [])
def test_nested_abstract_base(self):
A = self.create_model("A", abstract=True)
B = self.create_model("B", bases=(A,), abstract=True)
C = self.create_model("C", bases=(B,))
self.assertRelated(A, [B, C])
self.assertRelated(B, [C])
self.assertRelated(C, [])
def test_proxy_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
self.assertRelated(A, [B])
self.assertRelated(B, [])
def test_nested_proxy_base(self):
A = self.create_model("A")
B = self.create_model("B", bases=(A,), proxy=True)
C = self.create_model("C", bases=(B,), proxy=True)
self.assertRelated(A, [B, C])
self.assertRelated(B, [C])
self.assertRelated(C, [])
def test_multiple_mixed_bases(self):
A = self.create_model("A", abstract=True)
M = self.create_model("M")
P = self.create_model("P")
Q = self.create_model("Q", bases=(P,), proxy=True)
Z = self.create_model("Z", bases=(A, M, Q))
# M has a pointer O2O field p_ptr to P
self.assertRelated(A, [M, P, Q, Z])
self.assertRelated(M, [P, Q, Z])
self.assertRelated(P, [M, Q, Z])
self.assertRelated(Q, [M, P, Z])
self.assertRelated(Z, [M, P, Q])
| bsd-3-clause |
particl/particl-core | test/functional/feature_includeconf.py | 2 | 4066 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "particl.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error: Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
# Commented out as long as we ignore invalid arguments in configuration files
#with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# f.write("foo=bar\n")
#self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error: Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "particl.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| mit |
algorhythms/LeetCode | 917 Reverse Only Letters.py | 1 | 1071 | #!/usr/bin/python3
"""
Given a string S, return the "reversed" string where all characters that are not
a letter stay in the same place, and all letters reverse their positions.
Example 1:
Input: "ab-cd"
Output: "dc-ba"
Example 2:
Input: "a-bC-dEf-ghIj"
Output: "j-Ih-gfE-dCba"
Example 3:
Input: "Test1ng-Leet=code-Q!"
Output: "Qedo1ct-eeLg=ntse-T!"
Note:
S.length <= 100
33 <= S[i].ASCIIcode <= 122
S doesn't contain \ or "
"""
class Solution:
def reverseOnlyLetters(self, S: str) -> str:
lst = list(S)
i = 0
n = len(lst)
j = n - 1
while True:
while i < n and not lst[i].isalpha():
i += 1
while j >= 0 and not lst[j].isalpha():
j -= 1
if i < j and i < n and j >= 0:
lst[i], lst[j] = lst[j], lst[i]
i += 1
j -= 1
else:
break
return "".join(lst)
if __name__ == "__main__":
assert Solution().reverseOnlyLetters("Test1ng-Leet=code-Q!") == "Qedo1ct-eeLg=ntse-T!"
| mit |
Phil-LiDAR2-Geonode/pl2-geonode | geonode/base/translation.py | 35 | 1147 | from modeltranslation.translator import translator, TranslationOptions
from geonode.base.models import (TopicCategory, SpatialRepresentationType, Region,
RestrictionCodeType, License, ResourceBase)
class TopicCategoryTranslationOptions(TranslationOptions):
fields = ('description', 'gn_description',)
class SpatialRepresentationTypeTranslationOptions(TranslationOptions):
fields = ('description', 'gn_description',)
class RegionTranslationOptions(TranslationOptions):
fields = ('name',)
class RestrictionCodeTypeTranslationOptions(TranslationOptions):
fields = ('description', 'gn_description',)
class LicenseTranslationOptions(TranslationOptions):
fields = ('name', 'description', 'license_text',)
translator.register(TopicCategory, TopicCategoryTranslationOptions)
translator.register(SpatialRepresentationType, SpatialRepresentationTypeTranslationOptions)
translator.register(Region, RegionTranslationOptions)
translator.register(RestrictionCodeType, RestrictionCodeTypeTranslationOptions)
translator.register(License, LicenseTranslationOptions)
translator.register(ResourceBase)
| gpl-3.0 |
NeovaHealth/odoo | addons/report_intrastat/__init__.py | 377 | 1079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_intrastat
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arifsetiawan/configuration | playbooks/edx-east/lifecycle_inventory.py | 47 | 2776 | #!/usr/bin/env python
"""
Build an ansible inventory based on autoscaling group instance lifecycle state.
Outputs JSON to stdout with keys for each state and combination of autoscaling
group and state.
{
"InService": [
"10.0.47.127",
"10.0.46.174"
],
"Terminating:Wait": [
"10.0.48.104"
],
"e-d-CommonClusterServerAsGroup": [
"10.0.47.127",
"10.0.46.174"
],
"e-d-CommonClusterServerAsGroup_InService": [
"10.0.47.127",
"10.0.46.174"
],
"e-d-CommonClusterServerAsGroup_InService": [
"10.0.48.104"
]
}
"""
import argparse
import boto
import json
from collections import defaultdict
class LifecycleInventory():
profile = None
def __init__(self, profile):
parser = argparse.ArgumentParser()
self.profile = profile
def get_e_d_from_tags(self, group):
environment = "default_environment"
deployment = "default_deployment"
for r in group.tags:
if r.key == "environment":
environment = r.value
elif r.key == "deployment":
deployment = r.value
return environment,deployment
def get_instance_dict(self):
ec2 = boto.connect_ec2(profile_name=self.profile)
reservations = ec2.get_all_instances()
dict = {}
for instance in [i for r in reservations for i in r.instances]:
dict[instance.id] = instance
return dict
def run(self):
autoscale = boto.connect_autoscale(profile_name=self.profile)
groups = autoscale.get_all_groups()
instances = self.get_instance_dict()
inventory = defaultdict(list)
for group in groups:
for instance in group.instances:
private_ip_address = instances[instance.instance_id].private_ip_address
if private_ip_address:
environment,deployment = self.get_e_d_from_tags(group)
inventory[environment + "_" + deployment + "_" + instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[group.name].append(private_ip_address)
inventory[group.name + "_" + instance.lifecycle_state.replace(":","_")].append(private_ip_address)
inventory[instance.lifecycle_state.replace(":","_")].append(private_ip_address)
print json.dumps(inventory, sort_keys=True, indent=2)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--profile', help='The aws profile to use when connecting.')
parser.add_argument('-l', '--list', help='Ansible passes this, we ignore it.', action='store_true', default=True)
args = parser.parse_args()
LifecycleInventory(args.profile).run()
| agpl-3.0 |
Yong-Lee/django | tests/template_tests/filter_tests/test_timeuntil.py | 207 | 4426 | from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.template.defaultfilters import timeuntil_filter
from django.test import SimpleTestCase
from django.test.utils import requires_tz_support
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeuntilTests(TimezoneTestCase):
# Default compare with datetime.now()
@setup({'timeuntil01': '{{ a|timeuntil }}'})
def test_timeuntil01(self):
output = self.engine.render_to_string('timeuntil01', {'a': datetime.now() + timedelta(minutes=2, seconds=10)})
self.assertEqual(output, '2\xa0minutes')
@setup({'timeuntil02': '{{ a|timeuntil }}'})
def test_timeuntil02(self):
output = self.engine.render_to_string('timeuntil02', {'a': (datetime.now() + timedelta(days=1, seconds=10))})
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil03': '{{ a|timeuntil }}'})
def test_timeuntil03(self):
output = self.engine.render_to_string('timeuntil03', {'a': (datetime.now() + timedelta(hours=8, minutes=10, seconds=10))})
self.assertEqual(output, '8\xa0hours, 10\xa0minutes')
# Compare to a given parameter
@setup({'timeuntil04': '{{ a|timeuntil:b }}'})
def test_timeuntil04(self):
output = self.engine.render_to_string(
'timeuntil04',
{'a': self.now - timedelta(days=1), 'b': self.now - timedelta(days=2)},
)
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil05': '{{ a|timeuntil:b }}'})
def test_timeuntil05(self):
output = self.engine.render_to_string(
'timeuntil05',
{'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=2, minutes=1)},
)
self.assertEqual(output, '1\xa0minute')
# Regression for #7443
@setup({'timeuntil06': '{{ earlier|timeuntil }}'})
def test_timeuntil06(self):
output = self.engine.render_to_string('timeuntil06', {'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil07': '{{ earlier|timeuntil:now }}'})
def test_timeuntil07(self):
output = self.engine.render_to_string('timeuntil07', {'now': self.now, 'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil08': '{{ later|timeuntil }}'})
def test_timeuntil08(self):
output = self.engine.render_to_string('timeuntil08', {'later': self.now + timedelta(days=7, hours=1)})
self.assertEqual(output, '1\xa0week')
@setup({'timeuntil09': '{{ later|timeuntil:now }}'})
def test_timeuntil09(self):
output = self.engine.render_to_string('timeuntil09', {'now': self.now, 'later': self.now + timedelta(days=7)})
self.assertEqual(output, '1\xa0week')
# Ensures that differing timezones are calculated correctly.
@requires_tz_support
@setup({'timeuntil10': '{{ a|timeuntil }}'})
def test_timeuntil10(self):
output = self.engine.render_to_string('timeuntil10', {'a': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timeuntil11': '{{ a|timeuntil }}'})
def test_timeuntil11(self):
output = self.engine.render_to_string('timeuntil11', {'a': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil12': '{{ a|timeuntil:b }}'})
def test_timeuntil12(self):
output = self.engine.render_to_string('timeuntil12', {'a': self.now_tz_i, 'b': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
# Regression for #9065 (two date objects).
@setup({'timeuntil13': '{{ a|timeuntil:b }}'})
def test_timeuntil13(self):
output = self.engine.render_to_string('timeuntil13', {'a': self.today, 'b': self.today})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil14': '{{ a|timeuntil:b }}'})
def test_timeuntil14(self):
output = self.engine.render_to_string('timeuntil14', {'a': self.today, 'b': self.today - timedelta(hours=24)})
self.assertEqual(output, '1\xa0day')
class FunctionTests(SimpleTestCase):
def test_until_now(self):
self.assertEqual(timeuntil_filter(datetime.now() + timedelta(1, 1)), '1\xa0day')
def test_explicit_date(self):
self.assertEqual(timeuntil_filter(datetime(2005, 12, 30), datetime(2005, 12, 29)), '1\xa0day')
| bsd-3-clause |
josyb/myhdl | myhdl/_block.py | 4 | 12779 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2016 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Block with the @block decorator function. """
import inspect
#from functools import wraps
import functools
import myhdl
from myhdl import BlockError, BlockInstanceError, Cosimulation
from myhdl._instance import _Instantiator
from myhdl._util import _flatten
from myhdl._extractHierarchy import (_makeMemInfo,
_UserVerilogCode, _UserVhdlCode,
_UserVerilogInstance, _UserVhdlInstance)
from myhdl._Signal import _Signal, _isListOfSigs
from weakref import WeakValueDictionary
class _error:
pass
_error.ArgType = "%s: A block should return block or instantiator objects"
_error.InstanceError = "%s: subblock %s should be encapsulated in a block decorator"
class _CallInfo(object):
def __init__(self, name, modctxt, symdict):
self.name = name
self.modctxt = modctxt
self.symdict = symdict
def _getCallInfo():
"""Get info on the caller of a BlockInstance.
A BlockInstance should be used in a block context.
This function gets the required info from the caller
It uses the frame stack:
0: this function
1: block instance constructor
2: the decorator function call
3: the function that defines instances
4: the caller of the block function, e.g. a BlockInstance.
"""
stack = inspect.stack()
# caller may be undefined if instantiation from a Python module
callerrec = None
funcrec = stack[3]
name = funcrec[3]
if len(stack) > 4:
callerrec = stack[4]
# special case for list comprehension's extra scope in PY3
if name == '<listcomp>':
funcrec = stack[4]
if len(stack) > 5:
callerrec = stack[5]
name = funcrec[3]
frame = funcrec[0]
symdict = dict(frame.f_globals)
symdict.update(frame.f_locals)
modctxt = False
if callerrec is not None:
f_locals = callerrec[0].f_locals
if 'self' in f_locals:
modctxt = isinstance(f_locals['self'], _Block)
return _CallInfo(name, modctxt, symdict)
### I don't think this is the right place for uniqueifying the name.
### This seems to me to be a conversion concern, not a block concern, and
### there should not be the corresponding global state to be maintained here.
### The name should be whatever it is, which is then uniqueified at
### conversion time. Perhaps this happens already (FIXME - check and fix)
### ~ H Gomersall 24/11/2017
_inst_name_set = set()
_name_set = set()
def _uniqueify_name(proposed_name):
'''Creates a unique block name from the proposed name by appending
a suitable number to the end. Every name this function returns is
assumed to be used, so will not be returned again.
'''
n = 0
while proposed_name in _name_set:
proposed_name = proposed_name + '_' + str(n)
n += 1
_name_set.add(proposed_name)
return proposed_name
class _bound_function_wrapper(object):
def __init__(self, bound_func, srcfile, srcline):
self.srcfile = srcfile
self.srcline = srcline
self.bound_func = bound_func
functools.update_wrapper(self, bound_func)
self.calls = 0
# register the block
myhdl._simulator._blocks.append(self)
self.name_prefix = None
self.name = None
def __call__(self, *args, **kwargs):
name = (
self.name_prefix + '_' + self.bound_func.__name__ +
str(self.calls))
self.calls += 1
# See concerns above about uniqueifying
name = _uniqueify_name(name)
return _Block(self.bound_func, self, name, self.srcfile,
self.srcline, *args, **kwargs)
class block(object):
def __init__(self, func):
self.srcfile = inspect.getsourcefile(func)
self.srcline = inspect.getsourcelines(func)[0]
self.func = func
functools.update_wrapper(self, func)
self.calls = 0
self.name = None
# register the block
myhdl._simulator._blocks.append(self)
self.bound_functions = WeakValueDictionary()
def __get__(self, instance, owner):
bound_key = (id(instance), id(owner))
if bound_key not in self.bound_functions:
bound_func = self.func.__get__(instance, owner)
function_wrapper = _bound_function_wrapper(
bound_func, self.srcfile, self.srcline)
self.bound_functions[bound_key] = function_wrapper
proposed_inst_name = owner.__name__ + '0'
n = 1
while proposed_inst_name in _inst_name_set:
proposed_inst_name = owner.__name__ + str(n)
n += 1
function_wrapper.name_prefix = proposed_inst_name
_inst_name_set.add(proposed_inst_name)
else:
function_wrapper = self.bound_functions[bound_key]
bound_func = self.bound_functions[bound_key]
return function_wrapper
def __call__(self, *args, **kwargs):
name = self.func.__name__ + str(self.calls)
self.calls += 1
# See concerns above about uniqueifying
name = _uniqueify_name(name)
return _Block(self.func, self, name, self.srcfile,
self.srcline, *args, **kwargs)
class _Block(object):
def __init__(self, func, deco, name, srcfile, srcline, *args, **kwargs):
calls = deco.calls
self.func = func
self.args = args
self.kwargs = kwargs
self.__doc__ = func.__doc__
callinfo = _getCallInfo()
self.callinfo = callinfo
self.modctxt = callinfo.modctxt
self.callername = callinfo.name
self.symdict = None
self.sigdict = {}
self.memdict = {}
self.name = self.__name__ = name
# flatten, but keep BlockInstance objects
self.subs = _flatten(func(*args, **kwargs))
self._verifySubs()
self._updateNamespaces()
self.verilog_code = self.vhdl_code = None
self.sim = None
if hasattr(deco, 'verilog_code'):
self.verilog_code = _UserVerilogCode(deco.verilog_code, self.symdict, func.__name__,
func, srcfile, srcline)
if hasattr(deco, 'vhdl_code'):
self.vhdl_code = _UserVhdlCode(deco.vhdl_code, self.symdict, func.__name__,
func, srcfile, srcline)
if hasattr(deco, 'verilog_instance'):
self.verilog_code = _UserVerilogInstance(deco.vhdl_instance, self.symdict, func.__name__,
func, srcfile, srcline)
if hasattr(deco, 'vhdl_instance'):
self.vhdl_code = _UserVhdlInstance(deco.vhdl_instance, self.symdict, func.__name__,
func, srcfile, srcline)
self._config_sim = {'trace': False}
def _verifySubs(self):
for inst in self.subs:
if not isinstance(inst, (_Block, _Instantiator, Cosimulation)):
raise BlockError(_error.ArgType % (self.name,))
if isinstance(inst, (_Block, _Instantiator)):
if not inst.modctxt:
raise BlockError(_error.InstanceError % (self.name, inst.callername))
def _updateNamespaces(self):
# dicts to keep track of objects used in Instantiator objects
usedsigdict = {}
usedlosdict = {}
for inst in self.subs:
# the symdict of a block instance is defined by
# the call context of its instantiations
if isinstance(inst, Cosimulation):
continue # ignore
if self.symdict is None:
self.symdict = inst.callinfo.symdict
if isinstance(inst, _Instantiator):
usedsigdict.update(inst.sigdict)
usedlosdict.update(inst.losdict)
if self.symdict is None:
self.symdict = {}
# Special case: due to attribute reference transformation, the
# sigdict and losdict from Instantiator objects may contain new
# references. Therefore, update the symdict with them.
# To be revisited.
self.symdict.update(usedsigdict)
self.symdict.update(usedlosdict)
# Infer sigdict and memdict, with compatibility patches from _extractHierarchy
for n, v in self.symdict.items():
if isinstance(v, _Signal):
self.sigdict[n] = v
if n in usedsigdict:
v._markUsed()
if _isListOfSigs(v):
m = _makeMemInfo(v)
self.memdict[n] = m
if n in usedlosdict:
m._used = True
def _inferInterface(self):
from myhdl.conversion._analyze import _analyzeTopFunc
intf = _analyzeTopFunc(self.func, *self.args, **self.kwargs)
self.argnames = intf.argnames
self.argdict = intf.argdict
# Public methods
# The puropse now is to define the API, optimizations later
def _clear(self):
""" Clear a number of 'global' attributes.
This is a workaround function for cleaning up before converts.
"""
# workaround: elaborate again for the side effect on signal attibutes
self.func(*self.args, **self.kwargs)
# reset number of calls in all blocks
for b in myhdl._simulator._blocks:
b.calls = 0
def verify_convert(self):
self._clear()
return myhdl.conversion.verify(self)
def analyze_convert(self):
self._clear()
return myhdl.conversion.analyze(self)
def convert(self, hdl='Verilog', **kwargs):
"""Converts this BlockInstance to another HDL
Args:
hdl (Optional[str]): Target HDL. Defaults to Verilog
path (Optional[str]): Destination folder. Defaults to current
working dir.
name (Optional[str]): Module and output file name. Defaults to
`self.mod.__name__`
trace(Optional[bool]): Verilog only. Whether the testbench should
dump all signal waveforms. Defaults to False.
testbench (Optional[bool]): Verilog only. Specifies whether a
testbench should be created. Defaults to True.
timescale(Optional[str]): Verilog only. Defaults to '1ns/10ps'
"""
self._clear()
if hdl.lower() == 'vhdl':
converter = myhdl.conversion._toVHDL.toVHDL
elif hdl.lower() == 'verilog':
converter = myhdl.conversion._toVerilog.toVerilog
else:
raise BlockInstanceError('unknown hdl %s' % hdl)
conv_attrs = {}
if 'name' in kwargs:
conv_attrs['name'] = kwargs.pop('name')
conv_attrs['directory'] = kwargs.pop('path', '')
if hdl.lower() == 'verilog':
conv_attrs['no_testbench'] = not kwargs.pop('testbench', True)
conv_attrs['timescale'] = kwargs.pop('timescale', '1ns/10ps')
conv_attrs['trace'] = kwargs.pop('trace', False)
conv_attrs.update(kwargs)
for k, v in conv_attrs.items():
setattr(converter, k, v)
return converter(self)
def config_sim(self, trace=False, **kwargs) :
self._config_sim['trace'] = trace
if trace:
for k, v in kwargs.items() :
setattr(myhdl.traceSignals, k, v)
myhdl.traceSignals(self)
def run_sim(self, duration=None, quiet=0):
if self.sim is None:
sim = self
#if self._config_sim['trace']:
# sim = myhdl.traceSignals(self)
self.sim = myhdl._Simulation.Simulation(sim)
self.sim.run(duration, quiet)
def quit_sim(self):
if self.sim is not None:
self.sim.quit()
| lgpl-2.1 |
mattbierbaum/tmper | tmper/progress.py | 2 | 5026 | from __future__ import print_function
import sys
import time
import math
class ProgressBar:
def __init__(self, num, label='Progress', value=0, screen=79,
time_remaining=False, bar=True, bar_symbol='=', bar_caps='[]',
bar_decimals=2, display=True):
"""
ProgressBar class which creates a dynamic ASCII progress bar of two
different varieties:
1) A bar chart that looks like the following:
``Progress [================ ] 63.00%``
2) A simple number completed look:
``Progress : 17 / 289``
Parameters
-----------
num : integer
The number of tasks that need to be completed
label : string [default: 'Progress']
The label for this particular progress indicator,
value : integer [default: 0]
Starting value
screen : integer [default: 79]
Size the screen to use for the progress bar
time_remaining : boolean [default: True]
Display estimated time remaining
bar : boolean [default: True]
Whether or not to display the bar chart
bar_symbol : char [default: '=']
The character to use to fill in the bar chart
bar_caps : string [default: '[]']
Characters to use as the end caps of the. The string will be split in
half and each half put on a side of the chart
bar_decimals : integer [default: 2]
Number of decimal places to include in the percentage
display : boolean [default: True]
a crutch so that we don't have a lot of ``if``s later. display
or don't display the progress bar
"""
# TODO -- add estimated time remaining
self.num = num
self.value = value
self._percent = 0
self.time_remaining = time_remaining
self._deltas = []
self.display = display
self.label = label
self.bar = bar
self._bar_symbol = bar_symbol
self._bar_caps = bar_caps
self._decimals = bar_decimals
self.screen = screen
if len(self._bar_caps) % 2 != 0:
raise AttributeError("End caps must be even number of symbols")
if self.bar:
# 3 digit _percent + decimal places + '.'
self._numsize = 3 + self._decimals + 1
# end caps size calculation
self._cap_len = len(self._bar_caps)//2
self._capl = self._bar_caps[:self._cap_len]
self._capr = self._bar_caps[self._cap_len:]
# time remaining calculation for space
self._time_space = 11 if self.time_remaining else 0
# the space available for the progress bar is
# 79 (screen) - (label) - (number) - 2 ([]) - 2 (space) - 1 (%)
self._barsize = (
self.screen - len(self.label) - self._numsize -
len(self._bar_caps) - 2 - 1 - self._time_space
)
self._formatstr = '\r{label} {_capl}{_bars:<{_barsize}}{_capr} {_percent:>{_numsize}.{_decimals}f}%'
self._percent = 0
self._dt = '--:--:--'
self._bars = ''
if self.time_remaining:
self._formatstr += " ({_dt})"
else:
self._digits = str(int(math.ceil(math.log10(self.num))))
self._formatstr = '\r{label} : {value:>{_digits}} / {num:>{_digits}}'
self._dt = '--:--:--'
if self.time_remaining:
self._formatstr += " ({_dt})"
self.update()
def _estimate_time(self):
if len(self._deltas) < 3:
self._dt = '--:--:--'
else:
dt = np.diff(self._deltas[-25:]).mean() * (self.num - self.value)
self._dt = time.strftime('%H:%M:%S', time.gmtime(dt))
def _draw(self):
""" Interal draw method, simply prints to screen """
if self.display:
print(self._formatstr.format(**self.__dict__), end='')
sys.stdout.flush()
def increment(self):
self.update(self.value + 1)
def update(self, value=0):
"""
Update the value of the progress and update progress bar.
Parameters
-----------
value : integer
The current iteration of the progress
"""
self._deltas.append(time.time())
self.value = value
self._percent = 100.0 * self.value / self.num
if self.bar:
self._bars = self._bar_symbol*int(round(self._percent / 100. * self._barsize))
if (len(self._deltas) < 2) or (self._deltas[-1] - self._deltas[-2]) > 1e-1:
if self.time_remaining:
self._estimate_time()
self._draw()
if self.value == self.num:
self.end()
def end(self):
if self.display:
print('\r{lett:>{screen}}\r'.format(**{'lett':'', 'screen': self.screen}), end='')
| mit |
e-mission/e-mission-server | emission/tests/coreTests/TestBase.py | 2 | 6423 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Test the base class enhancements over AttrDict
# Since the base class should not really contain any properties, we create a
# dummy subclass here and use it for testing.
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import *
import unittest
import enum
# Our imports
import emission.core.wrapper.wrapperbase as ecwb
class TestEnum(enum.Enum):
A = 0
B = 1
C = 2
class TestWrapper(ecwb.WrapperBase):
props = {"a": ecwb.WrapperBase.Access.RO,
"b": ecwb.WrapperBase.Access.RO,
"c": ecwb.WrapperBase.Access.RO,
"WrapperBase": ecwb.WrapperBase.Access.RO,
"invalid": ecwb.WrapperBase.Access.RO,
"valid": ecwb.WrapperBase.Access.RW,
"write_a": ecwb.WrapperBase.Access.RW,
"unset": ecwb.WrapperBase.Access.WORM,
"write_local_dt": ecwb.WrapperBase.Access.WORM}
enums = {'a': TestEnum, 'b': TestEnum, 'write_a': TestEnum}
geojson = []
nullable = ["unset"]
local_dates = ['write_local_dt']
def _populateDependencies(self):
# Add new properties called "invalid" and "valid"
# with values from the input
# here, valid depends upon a and invalid depends upon b. Unfortunately, we cannot just do
# self.valid = True because that call the current setattr, and that will
# fail because dependent values are read-only. We can't even use the
# set_attr method of super, since that is WrapperBase and WrapperBase
# checks the "props" of the current class. Instead, we call the
# set_attr method of WrapperBase's parent, which has no checks.
if "a" in self and self.a == TestEnum.B:
super(ecwb.WrapperBase, self).__setattr__("valid", self.a)
if "b" in self and self.b == TestEnum.C:
super(ecwb.WrapperBase, self).__setattr__("invalid", self.b)
class TestBase(unittest.TestCase):
def testCreationABC(self):
test_tw = TestWrapper({'a': 1, 'b': 2, 'c': 3})
self.assertEqual(test_tw.valid, TestEnum.B)
self.assertEqual(test_tw.invalid, TestEnum.C)
self.assertTrue(str(test_tw).startswith("TestWrapper"))
def testCreationAB(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
self.assertEqual(test_tw.valid, TestEnum.B)
with self.assertRaises(AttributeError):
print ("test_tw.invalid = %s" % test_tw.invalid)
self.assertTrue(str(test_tw).startswith("TestWrapper"))
def testSetReadOnly(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
self.assertEqual(test_tw.valid, TestEnum.B)
with self.assertRaisesRegex(AttributeError, ".*read-only.*"):
test_tw.invalid = 2
def testGetSetReadWrite(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
self.assertEqual(test_tw.valid, TestEnum.B)
test_tw.valid = 2
self.assertEqual(test_tw.valid, 2)
def testSetEnumPositive(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
self.assertEqual(test_tw.valid, TestEnum.B)
test_tw.write_a = TestEnum.C
self.assertEqual(test_tw.write_a, TestEnum.C)
self.assertEqual(test_tw["write_a"], 2)
def testSetEnumNegative(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
self.assertEqual(test_tw.valid, TestEnum.B)
with self.assertRaisesRegex(AttributeError, ".*enum.*"):
test_tw.write_a = 2
def testSetInvalid(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
with self.assertRaisesRegex(AttributeError, ".*not defined.*"):
self.assertEqual(test_tw.z, 1)
def testGetReadOnly(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
self.assertEqual(test_tw.a, TestEnum.B)
def testGetInvalid(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
with self.assertRaisesRegex(AttributeError, ".*not defined.*"):
self.assertEqual(test_tw.z, 1)
def testIPythonAutoComplete(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
attributes = dir(test_tw)
self.assertIn("a", attributes)
self.assertIn("c", attributes)
self.assertIn("valid", attributes)
self.assertIn("invalid", attributes)
self.assertIn("b", attributes)
def testFirstTimeWrite(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
# This was originally unset and now we are setting it for the first time, so the write
# succeeds
test_tw.unset = 4
# Now that it is set, it cannot be changed since it is read-only, so an
# attempt to change it causes an exception
with self.assertRaisesRegex(AttributeError, ".*read-only.*"):
test_tw.unset = 5
def testNullable(self):
test_tw = TestWrapper({'a': 1, 'c': 3})
# this is nullable, so returns none if it is not set
self.assertIsNone(test_tw.unset)
# this is not nullable, so throws if not set
with self.assertRaisesRegex(AttributeError, ".*has no attribute.*"):
print("the value of b is %s" % test_tw.b)
# The nested classes are hard to test because they load the wrappers automatically
# from the wrapper directory, and so in order to test them, we either need to:
# - use a module that is already in wrapper, OR
# - create a new test module in wrapper
# Trying to use WrapperBase for now to test. If that doesn't work, we will
# switch to something else once we really have it.
def testNestedClass(self):
test_tw = TestWrapper({'a': 1, 'c': 3, 'WrapperBase': {'a': 11, 'c': 13}})
# self.assertEqual(test_tw.WrapperBase.a, 11)
def testLocalDate(self):
import emission.core.wrapper.localdate as ecwl
test_local = TestWrapper({'a': 1, 'c': 3})
test_local.write_local_dt = ecwl.LocalDate({'year': 2016, 'month': 4})
self.assertEqual(test_local.write_local_dt.year, 2016)
self.assertEqual(test_local.write_local_dt.month, 4)
with self.assertRaisesRegex(AttributeError, ".*has no attribute.*"):
print("the value of day is %s" % test_local.write_local_dt.day)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
raj454raj/eden | languages/ar.py | 7 | 143913 | # -*- coding: utf-8 -*-
{
'* Required Fields': 'الحقول المطلوبة *',
'# selected': '# selected',
'%(GRN)s Number': 'رقم مستند الادخال %(GRN)s',
'%(GRN)s Status': 'حالة %(GRN)s',
'%(PO)s Number': 'رقم طلبيه الشراء %(PO)s',
'%(REQ)s Number': 'رقم الطلبيات %(REQ)s',
'1 Assessment': 'التقييم1',
'2 different options are provided here currently:': 'يوجد خيارين مختلفين متوفرين هنا حاليا:',
'3W Report': 'تقرير',
'4-7 days': '7-4 أيام',
'8-14 days': '14-8 يوم',
'A brief description of the group (optional)': 'وصف موجز للمجموعة (اختياري)',
'A catalog of different Assessment Templates including summary information': 'فهرس نماذج التقيمات المختلفة وتتظمن ملخص للمعلومات',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'تم تحميل الملف من نظام تحديد المواقع الشامل(GPS) الذي يحتوي على سلسلة من النقاط الجغرافية في شكل أكس أم أل(XML).',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'ملف بتنسيق GPX مأخوذ من نظام GPS الذي يمكن أن ترتبط طوابعه الزمنية مع الطوابع الزمنية على الصور لتحديد مواقعها على الخريطة.',
'A library of digital resources, such as photos, documents and reports': 'مكتبة للموارد الرقمية ، مثل المستندات والصور والتقارير',
'A location group must have at least one member.': 'يجب أن يكون عضو واحد على الأقل في موقع المجموعة.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'وثيقة مرجعية مثل ملف ، عنوان الموقع أوشخص للاتصال به للتحقق من هذه البيانات. يمكنك كتابة أحرف قليلة من اسم وثيقة لتصل إلى مستند موجود.',
'Abbreviation': 'اختصار',
'Ability to customize the list of important facilities needed at a Shelter': 'القدرة على تخصيص قائمة المنشآت الهامة اللازمة في مأوى',
'Ability to Fill Out Surveys': 'إمكانية ملأ المسوحات',
'About': 'المتعلق',
'About Us': 'حولنا',
'ACCESS DATA': 'الوصول إلى البيانات',
'Access to education services': 'الوصول إلى خدمات التعليم',
'Access to Shelter': 'وصول الى المأوى',
'Account registered, however registration is still pending approval - please wait until confirmation received.': 'الحساب مسجل ، إلا أنه التسجيل لا يزال ينتظر الموافقة -- يرجى الانتظار حتى تأكيد استلامها.',
'Acronym': 'اختصار',
"Acronym of the organization's name, eg. IFRC.": 'اختصار اسم المنظمة، على سبيل المثال. IFRC.',
'Actionable by all targeted recipients': 'لا علامات متاحة حاليا',
'Active': 'نشيط',
'Active Problems': 'أحدث المشاكل',
'Activities matching Assessments:': 'التقييمات تطابق النشاطات',
'Activities of boys 13-17yrs before disaster': 'أنشطة الفتيان 13-17 سنة قبل الكارثة',
'Activities of boys 13-17yrs now': 'أنشطة الفتيان 13-17 سنة الآن',
'Activities of boys <12yrs now': 'نشاطات الفتيان <12سنة الآن',
'Activities of children': 'أنشطة الأطفال',
'Activities of girls 13-17yrs before disaster': 'أنشطة الفتيات من 13-17 سنة قبل الكارثة',
'Activity Added': 'النشاط المضاف',
'Activity Reports': 'تقارير النشاط',
'Activity Type': 'نوع النشاط',
'Activity Updated': 'تمت تحديث النشاط',
'Add': 'أضافة',
'Add a new certificate to the catalog.': 'إضافة شهادة جديدة إلى الكتالوج.',
'Add a new competency rating to the catalog.': 'إضافة تصنيف جديد للكفاءة إلى الكاتالوج',
'Add a new course to the catalog.': 'إضافة دورة جديدة للمصنف.',
'Add a new job role to the catalog.': 'إضافة دور وظيفة جديدة إلى الكتالوج.',
'Add a new skill provision to the catalog.': 'إضافة مهارات جديدة إلى الكتالوج.',
'Add Address': 'إضافة عنوان جديد',
'Add Alternative Item': 'إضافة عنصر بديل',
'Add Assessment Summary': 'إضافة ملخص التقييم',
'Add Baseline': 'إضافة خط قاعدي',
'Add Certificate for Course': 'اظافة شهادة للدورة',
'Add Certification': 'اضافه شهادة',
'Add Credential': 'أضف االاعتمادات',
'Add Credentials': 'أضف أوراق اعتماد',
'Add Demographic': 'اضافة التوزيع السكاني',
'Add Education': 'أضافة مستوى التعليم',
'Add Education Level': 'اضافة مستوى تعليمي جديد',
'Add Experience': 'أضافة خبرة جديدة',
'Add Human Resource': 'أضف موارد بشرية',
'Add Identity': 'إضافة هوية جديدة',
'Add Image': 'أضف صورة',
'Add Item': 'أضف العنصر',
'Add Item to Commitment ': 'إضافة عنصر إلى الإلتزام',
'Add Item to Inventory': 'إضافة عنصر الى الجرد',
'Add Job Role': 'أضف الدور الوظيفي',
'Add Level 1 Assessment': 'أضف تقييم المستوى 1',
'Add Location Group': 'إضافة موقع المجموعة',
'Add Membership': 'أضف عضوية',
'Add Need': 'أضف حاجة',
'Add New Assessment Summary': 'إضافة ملخص تقييم جديد',
'Add New Budget': 'أضف ميزانية جديدة',
'Add New Donor': 'إضافة مانح جديد',
'Add New Human Resource': 'إضافة موارد بشرية جديدة',
'Add New Impact': 'إضافة تأثير جديد',
'Add New Level 1 Assessment': 'إضافة تقييم جديد للمستوى 1',
'Add New Level 2 Assessment': 'أضف تقييم جديد للمستوى2',
'Add New Need': 'إضافة حاجة جديدة',
'Add New Need Type': 'إضافة نوع جديد من الإحتياجات',
'Add new project.': 'إضافة مشروع جديد.',
'Add New River': 'أضف نهرا جديدا',
'Add New Scenario': 'إضافة سيناريو جديد',
'Add New Sent Item': 'أضف عنصر مرسل جديد',
'Add New Survey Question': 'اضف سؤال اسطلاعي جديد',
'Add New Track': 'إضافة مسار جديد',
'Add new and manage existing staff.': 'اضافة وادارة الموظفين',
'Add new and manage existing volunteers.': 'اضافة وادارة المتطوعين',
'Add Peer': 'إضافة شخص قرين',
'Add Person': 'إضافة شخص',
'Add Photo': 'أضف صورة',
'Add Population Statistic': 'أضف إحصاء السكان',
'Add Program Hours': 'أضافة ساعات العمل',
'Add Question': 'إضافة سؤال',
'Add Recommendation Letter': 'اضافه شكر وتقدير',
'Add Recommendation Letter Type': 'اظافة نوع شكر وتقدير',
'Add Reference Document': 'أضف وثيقة مرجعية',
'Add Region': 'اضافة منطقة',
'Add Report': 'اضافة تقرير',
'Add Request': 'أضف طلبا',
'Add Skill': 'أضافة مهارة جديدة',
'Add Skill Equivalence': 'أضف مهارة معادلة',
'Add Solution': 'إضافة الحل',
'Add Staff': 'أضف موظفين',
'Add staff members': 'أضف موظفين',
'Add Survey Answer': 'أضف جواب حول الدراسة',
'Add Survey Question': 'إضافة سؤال في الاستطلاع',
'Add Team': 'أضافة فريق',
'Add Ticket': 'أضف تذكرة',
'Add Training': 'إضافة تدريب',
'Add Volunteer Availability': 'أضف توفر متطوعين',
'Added to Group': 'تمت اضافة العضوية',
'Added to Team': 'تمت اضافة العضوية',
'Additional Beds / 24hrs': 'أسرة إضافية / 24 ساعة',
'Address': 'العنوان',
'Address added': 'أُضيف العنوان',
'Address deleted': 'تم حذف العنوان',
'Address Type': 'نوع العنوان',
'Address updated': 'تم تحديث العنوان',
'Adequate': 'المناسب',
'Admin Assistant': 'مساعد المشرف',
'Admin Email': 'البريد الإلكتروني للمشرف',
'Administration': 'الادارة',
'Adult female': 'أنثى بالغة',
'Adult ICU': 'وحدة العناية المركزة المخصصة للبالغين',
'Adult male': 'الذكور البالغين',
'Adult Psychiatric': 'طبيب نفسي للكبار',
'Advanced:': 'المتقدمة:',
'Advisory': 'الاستشارية',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'بعد الضغط على الزر،ستظهر مجموعة من الوحدات المزدوجة واحدة تلو الأخرى.يرجى إختيار حل واحد من كل زوج تفضله عن الآخر',
'Age group does not match actual age.': 'الفئة العمرية لا تطابق السن الفعلي.',
'Agriculture': 'الزراعة',
'Airport': 'Airport',
'Alcohol': 'الكحول',
'All': 'الكل',
'All Resources': 'جميع الموارد',
'Allowed to push': 'يسمح للدفع',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'يسمح بتوسيع الميزانية إعتمادا على تكاليف الموظفين والمعدات ، بما في ذلك أي مشرف على النفقات العامة.',
'Allows authorized users to control which layers are available to the situation map.': 'تتيح للمستخدمين المسموح لهم التحكم بالطبقات التي تتوفر لخريطة الوضع.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'يسمح بإنشاء وإدارة الدراسات لتحديد الأضرار الناتجة عن الكوارث الطبيعية.',
'Alternative Item': 'عنصر بديل',
'Alternative Item Details': 'تفاصيل العناصرالبديلة',
'Alternative Items': 'عناصر بديلة',
'Alternative places for studying': 'أماكن بديلة للدراسة',
'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'يمكن اختيار نموذج لانشاء تقييم للكوارث، ان الاجابات يمكن ان تجمع والنتائج تحلل كجداول، مخططات او خرائظ',
'An item which can be used in place of another item': 'عنصر يمكن استخدامه في مكان عنصرآخر',
'Anthropology': 'أنثروبولوجيا',
'Apparent Age': 'العمرالظاهر',
'Apparent Gender': 'وضوح الجنس',
'Application Deadline': 'أخر موعد للتسجيل',
'Appointment Date': 'تاريخ التعيين',
'Appointment Number': 'رقم التعيين',
'Approve': 'الموافقة على',
'Arabic': 'العربية',
'Areas inspected': 'المناطق الي تم تفقدها',
'Assessment': 'تقييم',
'Assessment deleted': 'حُذف التقييم',
'Assessment Summary added': 'أُضيف ملخص التقييم',
'Assessment Summary deleted': 'تم حذف ملخص التقييم',
'Assessment Templates': 'نماذج التقييم',
'Assessment updated': 'تم تحديث التقييم',
'Assessments': 'تقييم',
'Assessments and Activities': 'التقييمات و النشاطات',
'Assessor': 'المستشار',
'Asset': 'الموجودات الثابتة',
'Asset Details': 'تفاصيل الإمتياز',
'Assets': 'الموجودات الثابتة',
'Assign Staff': 'تعيين الموظفين',
'Assign to Org.': 'تعيين في منظمة.',
'Assign to Organization': 'تعيين في منظمة',
'Assigned': 'تم التعيين',
'assigned': 'تم تعيينه',
'Assigned to Organization': 'تم التعيين في المنظمة',
'ATC-20 Rapid Evaluation modified for New Zealand': '20-ATC تقييم سريع معدل لنيوزيلندا',
'Attachments': 'المرفقات',
'Authentication Required': 'مطلوب المصادقة',
'Author': 'الكاتب',
'Availability': 'توفر',
'Available Beds': 'الأسرة المتاحة',
'Available databases and tables': 'جداول و قواعد البيانات المتوفرة',
'Available Records': 'الوثائق المتاحة',
'Avalanche': 'انهيار ثلجي',
'Bahai': 'بهائي',
'Banana': 'موز',
'Barricades are needed': 'هناك حاجة إلى المتاريس',
'Base Layers': 'طبقات القاعدة',
'Base Location': 'موقع القاعدة',
'Baseline added': 'تم إضافة الخط القاعدي',
'Baseline deleted': 'تم حذف الخطوط القاعدية',
'Baseline number of beds of that type in this unit.': 'رقم الخط القاعدي للأسرَّة لذلك النوع في هذه الوحدة.',
'Baselines': 'الخطوط القاعدية',
'Baselines Details': 'تفاصيل الخطوط القاعدية',
'Basic Details': 'تفاصيل أساسية',
'Baud': 'باود Baud',
'Baud rate to use for your modem - The default is safe for most cases': 'معدل الباود لاستخدام المودم الخاص بك --يعد اي اختيار امن في معظم الحالات',
'Bed Capacity per Unit': 'السعة السريرية لكل وحدة',
'Bed type already registered': 'نوع السرير مسجل سابقا',
'Beneficiaries': 'المستفيدين',
'Beneficiary Report': 'تقرير الستفيد',
'Beneficiary Type': 'انواع المستفيدين',
'Bin': 'سلة (صندوق)',
'blond': 'أشقر',
'Blood Type (AB0)': 'فصيلة الدم (AB0)',
'Blowing Snow': 'هبوب عاصفة ثلجية',
'blue': 'أزرق',
'Bomb': 'قنبلة',
'Bomb Explosion': 'انفجار قنبلة',
'Bomb Threat': 'تهديد القنبلة',
'Border Color for Text blocks': 'لون حاشية مجملالنص',
'Bounding Box Size': 'حجم المربع المحيط',
'Branch': 'فرع',
'Branch Coordinator': 'مدير الفرع',
'Branch Organization Capacity Assessment': 'تقيم قدرة فروع المنظمه',
'Branch Organization Capacity Assessments': 'تقييم قابلية المنظمية الفرعية',
'Branches': 'الفروع',
'Brand': 'العلامة التجارية',
'Brand added': 'تم إضافة العلامة التجارية',
'Brands': 'العلامات',
'Bridge Closed': 'جسر مغلق',
'Bucket': 'دلو',
'Buddhist': 'بوذي',
'Budget': 'الميزانية',
'Budget deleted': 'تم حذف الميزانية',
'Budget Details': 'تفاصيل الميزانية',
'Budget updated': 'تحديث الميزانية',
'Budgeting Module': 'وحدة القيام بالميزانية',
'Building Assessments': 'تقييمات المباني',
'Building Collapsed': 'مبنى ساقط',
'Building Name': 'اسم البناية',
'Building or storey leaning': 'بناء أو طابق مائل',
'Building Short Name/Business Name': 'إسم المبنى/الإسم التجاري',
'Built using the Template agreed by a group of NGOs working together as the': 'تم إنشاؤها باستخدام القالب الذي اتفقت عليه مجموعة من المنظمات غير الحكومية العاملة معا مثل',
'Bundle Contents': 'محتويات الحزمة',
'Bundle deleted': 'تم حذف الحزمة',
'Bundle Details': 'تفاصيل الحزمة',
'Bundle Updated': 'تم تحديث الحزمة',
'Bundles': 'حزم',
'by': 'من طرف',
'Calculate': 'حساب',
'Calendar': 'Calendar',
'Camp': 'مخيم',
'Camp Coordination/Management': 'تنسيق/إدارة المخيم',
'Can only disable 1 record at a time!': 'يمكن تعطيل سجل واحد فقط مرة واحدة',
'Cancel Log Entry': 'Cancel Log Entry',
'Canceled': 'تم الغاؤه',
'Cannot be empty': 'لا يمكن أن تكون فارغة',
'Capture Information on each disaster victim': 'جمع معلومات عن كل ضحية',
'Capturing organizational information of a relief organization and all the projects they have in the region': 'التقاط المعلومات التنظيمية لمساعدة المنظمة وجميع مشاريعهم في المنطقة',
'Casual Labor': 'عمل متقطع',
'Catalog': 'كاتالوج',
'Catalog added': 'أُضيف الكاتالوج',
'Catalog deleted': 'تم حذف الكتالوج',
'Catalog Details': 'تفاصيل الكتالوج',
'Catalog Item deleted': 'تم حذف عنصر من الكتالوج',
'Catalog updated': 'تم تحديث المصنف',
'Catalogs': 'اللوائح',
'Category': 'فئة',
'Ceilings, light fixtures': 'أسقف ، مصابيح',
'Cell Tower': 'Cell Tower',
'Certificate': 'شهادة',
'Certificate added': 'تمت اضافة الشهادة',
'Certificate Catalog': 'سجل الشهادة',
'Certificate deleted': 'تم حذف الشهادة',
'Certificate Details': 'تفاصيل الشهادة',
'Certificate List': 'لائحة الشهادات',
'Certificates': 'الشهادات',
'Certification': 'شهادة',
'Certification added': 'اضيفت الشهادات',
'Certification deleted': 'تم حذف الشهادة',
'Certification Details': 'تفاصيل الشهادة',
'Certification updated': 'تم تحديث الشهادات',
'Certifying Organization': 'ترسيم المنظمة',
'Change Password': 'تغيير كلمة السر',
'Chapter': 'Chapter',
'Check': 'فحص',
'Check Request': 'فحص الطلب',
'Check to delete': 'تحقق قبل الحذف',
'Checklist deleted': 'تم حذف القائمة',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'هجوم أو تهديد انفجاري ذا قدرة عالية أو كيميائي،بيولوجي أواشعاعي',
'Chicken': 'دجاج',
'Child': 'طفل',
'Child (< 18 yrs)': 'طفل (< 18 عاما)',
'Child Abduction Emergency': 'حالة الطوارئ في حالات اختطاف الأطفال',
'Child headed households (<18 yrs)': 'الأسر التي يرأسها أطفال (<18 عاما)',
'Children in juvenile detention': 'الأطفال الذين هم في محتجز الأحداث',
'Children orphaned by the disaster': 'الأطفال الذين تيتموا بسبب الكارثة',
'Children that have been sent to safe places': 'الأطفال الذين تم إرسالهم إلى أماكن آمنة',
'Children who have disappeared since the disaster': 'الأطفال الذين اختفوا منذ وقوع الكارثة',
'Chinese (Taiwan)': 'الصينية (تايوان)',
'Cholera-Treatment-Center': 'مركزعلاج الكوليرا',
'Choose File': 'اختيار مستند',
'Christian': 'مسيحي',
'Church': 'كنيسة',
'City': 'مدينة',
'Cleaner': 'المنظف',
'Clear filter': 'مسح الاعداد',
'click for more details': 'اضغط هنا للمزيد من التفاصيل',
'Click on the link %(url)s to reset your password': 'انقر على الرابط %(url)s to reset your password',
'Click on the link %(url)s to verify your email': 'انقر على الرابط %(url)s للتحقق من بريدك الالكتروني',
'Clinical Operations': 'عمليات سريرية',
'Clinical Status': 'حالة سريرية',
'Close map': 'Close map',
'Closed': 'تم الغلق',
'Clothing': 'ملابس',
'Cloud forecast': 'توقع الغيوم',
'Cluster Subsector': 'كتلة القطاع الفرعي',
'Cluster(s)': 'كتلة(ج)',
'Code': 'كود',
'Cold Wave': 'موجة برد',
'Color of Buttons when hovering': 'لون الأزرار عندما تحوم',
'Color of dropdown menus': 'لون القائمة المنسدلة',
'Color of selected Input fields': 'لون مجال المعلومات المختارة',
'Color of selected menu items': 'لون عناصر القائمة المختارة',
'Combined Method': 'طريقة مركبة',
'Comments': 'التعليقات',
'Commit from %s': 'تسليم حسب %',
'Commit Status': 'حالة الإلتزام.',
'Commiting a changed spreadsheet to the database': 'الإلتزام بجدول التغيرات لقاعدة البيانات',
'Commitment Details': 'تفاصيل الالتزام',
'Commitment Item deleted': 'تم حذف الالتزام',
'Commitment Item Details': 'تفاصيل التزام العنصر',
'Commitment Updated': 'تم تحديث الالتزام',
'Commitments': 'الالتزامات',
'Communication Officer': 'موظف الاتصالات',
'Communities': 'المنظمات',
'Community Contacts': 'الاتصال المجتمع',
'Community Health Center': 'مركز لصحة المجتمع',
'Competencies': 'الكفاءات',
'Competency Details': 'تفاصيل الكفاءة',
'Competency Rating Details': 'تفاصيل تقييم الكفاءة',
'Completed': 'تم',
'completed': 'تم',
'Completion Question': 'الاسئله التكميليه',
'Compose': 'شكّل',
'Condition': 'الشروط',
'Configurations': 'تكوينات',
'Confirm Shipment Received': 'تأكيد تلقى الشحنة',
'Conflict Details': 'تفاصيل الصراع',
'Conflict Resolution': 'حل التضاربات',
'consider': 'يعتبر',
'Consumable': 'مستهلكات',
'Contact': 'اتصال',
'Contact Data': 'بيانات الاتصال',
'Contact details': 'تفاصيل الاتصال',
'Contact Information': 'معلومات الشخص المُتصل به',
'Contact Information Deleted': 'حُذفت معلومات المُتصل به',
'Contact information updated': 'تم تحديث معلومات الاتصال',
'Contact Name': 'اسم جهة الاتصال',
'Contact Phone': 'هاتف الاتصال',
'Contact us': 'اتصل بنا',
'Contacts': 'وسائل الاتصال',
'Contract': 'العقد',
'Contract End Date': 'تاريخ انتهاء العقد',
'Contributor': 'مشارك',
'Conversion Tool': 'أداة تحويل',
'Coordinator': 'المنسق',
'Copy': 'نسخ',
'Cost per Megabyte': 'تكلفة لكل ميغا بايت',
'Cost Type': 'نوع التكلفة',
"couldn't be parsed so NetworkLinks not followed.": 'لا يمكن تحليل ذلك. تعذر تتبع NetworkLinks.',
'Count': 'العد',
'Country': 'البلد',
'Course': 'الدورة',
'Course Certificate updated': 'تم تحديث شهادة الدورة',
'Course Certificates': 'شهادات الدورات',
'Course Details': 'تفاصيل الدورة',
'Create': 'إضافة',
'Create a Person': 'إضافة شخص',
'Create Activity': 'إضافة نشاط',
'Create Assessment': 'اضافه تقييم',
'Create Asset': 'إضافة إمتياز جديد',
'Create Bed Type': 'إضافة نوع السرير',
'Create Brand': 'اضافه علامه تجاريه جديده',
'Create Budget': 'اضافة ميزانية',
'Create Catalog': 'فهرس الدورة',
'Create Certificate': 'اضافة شهادة',
'Create Cholera Treatment Capability Information': 'أضف معلومات حول معالجة الكوليرا',
'Create Course': 'إضافة دورة أو درس',
'Create Dead Body Report': 'أضف تقريرا عن جثة',
'Create Department': 'اضافة قسم',
'Create Event Type': 'اضافة نوع النشاط',
'Create Facility': 'إضافة مرفق',
'Create Group': 'إضافة مجموعة',
'Create Hospital': 'إضافة مستشفى جديد',
'Create Identification Report': 'إضافة تقرير الهوية',
'Create Incident Report': 'اضافة تقرير لحادث',
'Create Incident Type': 'اضافة نوع الحادث',
'Create Item': 'إضافة عنصر جديد',
'Create Item Catalog': 'اضافه تصنيف جديد',
'Create Job Title': 'اضافة عنوان العمل',
'Create Kit': 'إضافة أدوات جديدة',
'Create Map Profile': 'اضافة تكوين خريطة',
'Create Marker': 'أضف ماركر',
'Create Mobile Impact Assessment': 'ايجاد تقييم للتأثير المتنقل',
'Create National Society': 'إنشاء جمعية وطنية',
'Create Office': 'أضف مكتبا',
'Create Organization': 'أضف منظمة',
'Create Partner Organization': 'انشاء منظمة شريكه',
'Create Program': 'اضافة برنامج',
'Create Project': 'اضافه مشروع',
'Create Report': 'اضافة تقرير جديد',
'Create Request': 'إنشاء طلب',
'Create Resource': 'أضف موردا',
'Create River': 'أضف نهرا',
'Create Room': 'إضافة غرفة',
'Create Sector': 'أضف قطاعا',
'Create Shelter': 'أضف مأوى',
'Create Shelter Service': 'أضف خدمة مأوى',
'Create Skill': 'إضافة كفاءة',
'Create Staff Member': 'اضافه موظف',
'Create Supplier': 'اضافه مجهز',
'Create Training Event': 'اضافة نشاط تدريبي',
'Create User': 'إضافة مستخدم',
'Create Volunteer': 'أضف متطوعا',
'Create Volunteer Role': 'إنشاء دور المتطوعين',
'Create Warehouse': 'أضف مستودعا',
'Credential deleted': 'تم حذف الاعتماد',
'Credential Details': 'تفاصيل الاعتماد',
'Credential updated': 'تم تحديث الاعتماد',
'Crime': 'جريمة',
'Criteria': 'معيار',
'Currency': 'عملة',
'current': 'حساب',
'Current community priorities': 'أولويات المجتمع السائد',
'Current greatest needs of vulnerable groups': 'أكبرالاحتياجات الحالية للفئات الضعيفة',
'Current Group Members': 'أعضاء الفريق الحالي',
'Current health problems': 'المشاكل الصحية الحالية',
'Current Home Address': 'عنوان السكن الحالي',
'Current Log Entries': 'إدخالات السجل الحالي',
'Current problems, details': 'المشاكل الحالية، تفاصيل',
'Current response': 'إستجابة حالية',
'Current Team Members': 'أعضاء الفريق الحالي',
'Current Twitter account': 'حساب twitter الحالي',
'Current Weather': 'الطقس لحالي',
'Currently no Certifications registered': 'لا توجد شهادات مسجلة حاليا',
'Currently no entries in the catalog': 'حاليا لا مقال في الكاتالوج',
'CV': 'السيرة الذاتية',
'dark': 'داكن',
'Dashboard': 'لوحة التحكم',
'data uploaded': 'تم تحميل البيانات',
'database %s select': 'اختر قاعدة البيانات بـ%s',
'Date': 'التاريخ',
'Date & Time': 'التاريخ والوقت',
'Date and time this report relates to.': 'تاريخ و وقت هذا التقرير يتعلق بـ.',
'Date Needed By': 'تاريخ الوصول',
'Date of Birth': 'تاريخ الميلاد',
'Date of Latest Information on Beneficiaries Reached': 'لقد وصل تاريخ آخر المعلومات عن المستفيدين',
'Date Printed': 'تاريخ الطبع',
'Date Question': 'تاريخ السؤال',
'Date Received': 'تاريخ استلامه',
'Date Repacked': 'Date Repacked',
'Date Requested': 'الموعد المطلوب',
'Date Sent': 'تاريخ المرسلة',
'Date/Time': 'التاريخ / الوقت',
'Dead Body Details': 'تفاصيل الجسم الميت',
'Dead Body Reports': 'تقارير الجثث الميتة',
'deceased': 'متوفي',
'Decimal Degrees': 'الدرجات العشرية',
'Decomposed': 'متحللة',
'Default': 'اساسي',
'Default Width of the map window.': 'العرض الافتراضي لإطار الخريطة.',
'Defines the icon used for display of features on interactive map & KML exports.': 'يعرف الرمز المستخدم لعرض ملامح من الخريطة التفاعلية وصادرات KML.',
'Defines the marker used for display & the attributes visible in the popup.': 'يحدد العلامة المستخدمة للعرض والسمات الظاهرة للتوضيح',
'Delete': 'مسح',
'Delete Assessment': 'حذف التقييم',
'Delete Assessment Summary': 'حذف ملخص التقييم',
'Delete Baseline': 'حذف الخط القاعدي',
'Delete Commitment': 'حذف الالتزام',
'Delete Contact Information': 'حذف معلومات الشخص المراد الاتصال به',
'Delete Course': 'حذف الدورة',
'Delete Credential': 'حذف الاعتمادات',
'Delete Donor': 'حذف مانح',
'Delete Entry': 'إلغاء الدخول',
'Delete Event': 'حذف الحدث',
'Delete Feature Layer': 'حذف خاصية الطبقة',
'Delete Kit': 'حذف طقم أدوات',
'Delete Layer': 'حذف طبقة',
'Delete Level 2 Assessment': 'حذف تقييم المستوى 2',
'Delete Location': 'حذف الموقع',
'Delete Map Profile': 'حذف تكوين خريطة',
'Delete Message': 'حذف الرسالة',
'Delete Mission': 'حذف البعثة',
'Delete Need': 'حذف الحاجة',
'Delete Need Type': 'حذف نوع الحاجة',
'Delete Organization': 'إلغاء المنظمة',
'Delete Photo': 'حذف الصورة',
'Delete Population Statistic': 'حذف إحصاء السكان',
'Delete Projection': 'حذف التخطيط',
'Delete Rapid Assessment': 'حذف التقييم السريع',
'Delete Received Item': 'تم حذف العنصر المستَقبل',
'Delete Received Shipment': 'حذف الشحنة المتلقاة',
'Delete Request': 'حذف الطلب',
'Delete Request Item': 'حذف عنصرالطلب',
'Delete Room': 'حذف غرفة',
'Delete Scenario': 'حذف السيناريو',
'Delete Sector': 'حذف قطاع',
'Delete Setting': 'حذف إعداد',
'Delete Skill': 'حذف مهارة',
'Delete Skill Equivalence': 'إلغاء تكافؤ الكفاءة',
'Delete Status': 'حذف حالة',
'Delete Survey Series': 'حذف سلاسل المسح',
'Delete Training Event': 'مسح عمل التدريب',
'Delete Unit': 'تم حذف الوحدة',
'Delete Warehouse': 'حذف المستودع',
'deleted': 'محذوف',
'Delphi Decision Maker': 'دلفي ديسيجن مايكر',
'Demographics': 'التوزيع السكاني',
'Demonstrations': 'مظاهرات',
'Department / Unit': 'القسم / الوحدة',
'Department Catalog': 'لائحة الاقسام',
'Describe the condition of the roads to your hospital.': 'وصف حالة الطرق الى المستشفى الخاص بك.',
'Description': 'الوصف',
'Description of defecation area': 'وصف منطقة التغوط',
'Description of drinking water source': 'وصف مصدر مياه الشرب',
'Design, deploy & analyze surveys.': 'تصميم',
'Destination': 'الوجهة',
'Details': 'التفاصيل',
'Dialysis': 'غسيل الكلى',
'Diarrhea': 'إسهال',
'Direct Date': 'مباشر التسجيل',
'Direct Number': 'رقم المباشرة',
'Disable': 'تعطيل',
'Disabled': 'معطل',
'Disabled participating in coping activities': 'تعطيل المشاركة في أفضل النشاطات',
'Disabled?': 'معاق؟',
'Disaster Assessments': 'تقييمات الكوارث',
'Disaster clean-up/repairs': 'الإصلاحات/التنظيف بعد الكارثة',
'Discussion Forum': 'منتدى الحوار',
'Discussion Forum on item': 'نقاش المنتدى حول الموضوع',
'Disease vectors': 'ناقلات الأمراض',
'diseased': 'متوفي',
'Dispensary': 'مستوصف',
'Displaced': 'النازحون',
'Display Waypoints?': 'عرض نقاط الطريق؟',
'Distance between defecation area and water source': 'المسافة بين منطقة التغوط ومصدر المياه',
'Distribution': 'التوزيع',
'Distribution groups': 'مجموعات التوزيع',
'District': 'مقاطعة',
'divorced': 'مطلق',
'DNA Profile': 'بيانات الحمض النووي',
'DNA Profiling': 'تحليل DNA',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'هل تريد إلغاء هذه الشحنة المستلمة؟ ستتم إزالة العناصر من المخزون.لا يمكن التراجع عن هذا الاجراء !',
'Do you want to send this shipment?': 'هل تريد إرسال هذه الشحنة؟',
'Document added': 'تمت اضافة الوثيقة',
'Document deleted': 'تم حذف الوثيقة',
'Document Details': 'تفاصيل الوثيقة',
'Documents': 'وثائق',
'Documents and Photos': 'وثائق وصور',
'Dollars': 'دولار',
'Domestic chores': 'الأعمال المنزلية',
'Donation Phone #': 'رقم هاتف التبرعات',
'Donor': 'المانح',
'Donor added': 'تمت اضافة المانح',
'Donor deleted': 'تم حذف المانح',
'Donors': 'الجهات المانحة',
'Donors Report': 'تقريرعن الجهات المانحة',
'Download Template': 'تنزيل نموذج',
'Draft': 'مسودة مشروع',
'Drill Down by Incident': 'الوصول إلى البيانات على نحو متزايد عن طريق الحوادث',
'Driver': 'السائق',
'Driver Phone Number': 'رقم هاتف السائق',
'Driving License': 'رخصة السياقة',
'Drugs': 'أدوية',
'Dug Well': 'بئرمحفور',
'duplicate': 'انسخ',
'Duplicate?': 'مكرر؟',
'Duration': 'المدة',
'Dust Storm': 'عاصفة ترابية',
'DVI Navigator': 'متصفح DVI',
'Early Recovery': 'الإنعاش المبكر',
'Earthquakes: Recent Events': 'هزات ارضية',
'Edit': 'تعديل',
'Edit Address': 'تحرير عنوان',
'Edit Application': 'تحريرتطبيق Edit Application',
'Edit Asset Log Entry': 'تعديل سجل الدخول للإمتياز',
'Edit Baseline': 'تحريرخط قاعدي',
'Edit Baseline Type': 'تحرير نوع الخط القاعدي',
'Edit Brand': 'تحرير العلامة التجارية',
'Edit Bundle': 'تحرير حزمة',
'Edit Certification': 'تحرير شهادة',
'Edit Commitment': 'تحرير التزام',
'Edit Competency': 'تحرير الكفاءة',
'Edit Contact': 'تحرير الاتصال',
'Edit Contact Information': 'تعديل معلومات الشخص المتّصَل به',
'Edit Course': 'تصحيح الدورة',
'Edit Credential': 'تحريراعتماد',
'Edit Details': 'تحرير التفاصيل',
'Edit Disaster Victims': 'تحرير ضحايا الكارثة',
'Edit Facility': 'تحرير مرفق',
'Edit Feature Class': 'تعديل خاصية الطبقة',
'Edit Hospital': 'تحرير مستشفى',
'Edit Human Resource': 'تحرير الموارد البشرية',
'Edit Identification Report': 'تحرير تقريرعن تحديد الهوية',
'Edit Impact': 'تحرير تأثير',
'Edit Impact Type': 'تعديل نوع الأثر',
'Edit Incident Report': 'تحرير تقريرحوادث',
'Edit Item': 'تحرير عنصر',
'Edit Item Pack': 'تعديل عنصر التحزيم',
'Edit Key': 'تحرير مفتاح',
'Edit Kit': 'تحرير كيت',
'Edit Level 1 Assessment': 'تحريرلتقييم من مستوى 1',
'Edit Level 3 Locations?': 'تحريرمواقع المستوى 3؟',
'Edit Location': 'تحرير موقع',
'Edit Map Profile': 'تحرير تكوين الخريطة',
'Edit Map Services': 'تحرير خدمات الخريطة',
'Edit Marker': 'تحرير ماركر (علامة)',
'Edit Message': 'تحرير رسالة',
'Edit Messaging Settings': 'تعديل نظام بعث الرسائل',
'Edit Mission': 'تعديل المهمة',
'Edit Office': 'مكتب التحرير',
'Edit Parameters': 'تحرير المعاملات',
'Edit Personal Effects Details': 'تحريرتفاصيل التأثيرات الشخصية',
'Edit Population Statistic': 'تصحيح إحصاء السكان',
'Edit Position': 'تعديل الوضع',
'Edit Problem': 'تحرير مشكلة',
'Edit Rapid Assessment': 'تحرير تقييم سريع',
'Edit Received Shipment': 'تحرير الشحنة المتلقاة',
'Edit Request': 'تحرير طلب',
'Edit Resource': 'تحرير الموارد',
'Edit River': 'تحرير نهر',
'Edit Room': 'تحرير غرفة',
'Edit Scenario': 'تحرير السيناريو',
'Edit Setting': 'تحرير إعداد',
'Edit Shelter': 'تحرير مأوى',
'Edit Shelter Service': 'تحرير خدمات المأوى',
'Edit Skill': 'تعديل المؤهل',
'Edit Team': 'فريق التحرير',
'Edit Theme': 'تحرير الموضوع',
'Edit Ticket': 'تحرير تذكرة',
'Edit Training': 'تحرير تدريب',
'Edit Training Event': 'تعديل عمل التدريب',
'Edit Warehouse': 'تحرير مستودع',
'Education': 'مستوى التعليم',
'Education materials received': 'مواد التعليم المتلقاة',
'Effects Inventory': 'جرد التأثيرات',
'Either file upload or image URL required.': 'إيداع ملف أو رابط الصورة مطلوب.',
'Elevated': 'مرتفع',
'Elevators': 'المصاعد',
'Email': 'البريد الإلكتروني',
'Email Address': 'البريد الالكتروني',
'Embalming': 'تحنيط',
'Embassy': 'سفارة',
'Emergency Contacts': 'وسائل الاتصال الطارئة',
'Emergency Department': 'قسم الطوارئ',
'Emergency Shelter': 'مأوى للطواريء',
'Emergency Support Facility': 'مرفق الطواريء',
'Emergency Support Service': 'خدمة الدعم في حالات الطواريء',
'Empty': 'فارغ',
'Enable/Disable Layers': 'تمكين/تعطيل الطبقات',
'End date': 'تاريخ النهاية',
'End Date': 'نهاية التاريخ',
'End of Period': 'نهاية حقبة',
'English': 'الإنجليزية',
'Enter Coordinates:': 'أدخل الإحداثيات:',
'Enter the same password as above': 'أدخل كلمة المرور نفسها على النحو الوارد أعلاه',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'إدخال رقم الهاتف هو اختياري، ولكن القيام بذلك يسمح لك الاشتراك لاستقبال الرسائل القصيرة.',
'Entry deleted': 'حذف دخول',
'Environment': 'البيئة',
'ER Status': 'حالة الطوارئ',
'Error in message': 'خطأ في الرسالة',
'Estimated # of households who are affected by the emergency': 'العدد المقدر للأسر التي تأثرت بحالة الطوارئ',
'Estimated Delivery Date': 'تاريخ الوصول المتوقع',
'Estimated total number of people in institutions': 'المجموع المقدر لعدد الأشخاص في المؤسسات',
'Euros': 'يورو',
'Event added': 'تمت اضافة الحدث',
'Event deleted': 'تم حذف الحدث',
'Event Types': 'نوع النشاط',
'Event updated': 'تم تحديث الحدث',
'Events': 'الوقائع',
'Exceeded': 'تم تجاوزه (ا)',
'Execute a pre-planned activity identified in <instruction>': 'تنفيذ نشاط مخطط مسبقا معرف في <البنية<instruction>',
'Exercise?': 'تمرين؟',
'Experience': 'الخبرة',
'Expiration Date': 'تاريخ الانتهاء',
'Expiration Report': 'تقرير الختامي',
'Expiring Staff Contracts Report': 'تقرير أنهاء عقود الموظفين',
'Expiry Date': 'تاريخ انتهاء الصلاحية',
'Expiry (months)': 'شهر الانتهاء',
'Export as': 'تصدير',
'export as csv file': 'تصدير كملف csv',
'Export Database as CSV': 'تصدير قاعدة البيانات ك CSV',
'Export in GPX format': 'التصدير في شكل GPX',
'Export in OSM format': 'التصدير في شكل OSM',
'Export in RSS format': 'تصدير في RSS',
'Exterior and Interior': 'خارجي وداخلي',
'Exterior Only': 'الخارج فقط',
'Eye Color': 'لون العينين',
'Facebook': 'فيسبوك',
'Facial hair, type': 'شعر الوجه ، النوع',
'Facilities': 'منشأة',
'Facility': 'مرفق',
'Facility Details': 'تفاصيل المرفق',
'Facility Operations': 'عمليات التسهيل',
'Facility Status': 'حالة المرفق',
'Facility Types': 'أنواع المنشأت',
'Falling Object Hazard': 'خطر سقوط المشاريع',
'Family/friends': 'عائلة / أصدقاء',
'Fax': 'فاكس',
'Feature Class': 'ميزة الفئة',
'Feature Class deleted': 'حُذفت خصائص الفئة',
'Female': 'أنثى',
'female': 'أنثى',
'Female headed households': 'الأسر التي ترأسها الإناث.',
'Few': 'قليل',
'File': 'ملف',
'Fill in Longitude': 'ملء خط الطول',
'Filter Options': 'خيارات الترشيح',
'Finance Officer': 'موضف المالية',
'Find': 'البحث',
'Find Dead Body Report': 'البحث عن تقريرالمتوفين',
'Find Hospital': 'البحث عن مستشفى',
'Find Volunteers': 'البحث عن متطوعين',
'Fingerprinting': 'البصمات',
'Fingerprints': 'بصمات الأصابع',
'Fire': 'حريق',
'First': 'اولاً',
'First Name': 'الاسم',
'Flash Flood': 'طوفان مفاجئ',
'Flash Freeze': 'فلاش تجميد',
'Fleet Manager': 'مدير الموكب',
'Flood': 'فيضان',
'Flood Alerts': 'إنذار عن حدوث فيضان',
'Flood Report': 'تقرير الفيضانات',
'Flood Report added': 'تمت اضافة تقرير الفيضانات',
'Flood Report Details': 'تفاصيل تقرير الفيضانات',
'Flood Reports': 'تقاريرالفيضانات',
'Food Supply': 'الإمدادات الغذائية',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'لكل شريك في المزامنة، هناك وظيفة مزامنة مفترضة يتم تشغيلها بعد فترة محددة من الوقت. يمكنك ايضا انشاء المزيد من الوظائف التي يمكن تخصيصها حسب احتياجاتك. اضغط على الرابط الموجود عن اليمين للبدء.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'لتعزيز الأمن، ننصحك بإدخال اسم المستخدم وكلمة السر ، وإبلاغ مدراء الآلات الأخرى في منظمتك بإضافة هذا الإسم وكلمة السر ضد UUID في التزامن --> مزامنة الشركاء',
'For live help from the Sahana community on using this application, go to': 'للحصول على مساعدة مباشرة من مجتمع ساهانا (Sahana )باستخدام هذا التطبيق ،انتقل إلى',
'For more information, see ': 'لمزيد من المعلومات، انظر',
'forehead': 'جبين',
'Formal camp': 'مخيم رسمي',
'Format': 'هيئة',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": 'تشكيل قائمة القيم المرمزة وقيمة RGB لاستخدامها كغاية لـ JSON ، على سبيل المثال : {الأحمر : \'# FF0000 ، الأخضر :\' # 00FF00 "، الأصفر:\' # FFFF00 \'}',
'Forms': 'أشكال',
'found': 'وُجد',
'Foundations': 'مؤسسات',
'French': 'الفرنسية',
'Friday': 'الجمعة',
'From Inventory': 'من المخزون',
'Funding': 'تمويل',
'Fulfil. Status': 'حالة الانجاز',
'Full': 'تام',
'Full beard': 'لحية كاملة',
'Funding Report': 'التقرير المالي',
'Further Action Recommended': 'ينصح بالمزيد من العمل',
'Gale Wind': 'ريح عاصفة',
'Gateway settings updated': 'تم تحديث إعدادات البوابة',
'General Medical/Surgical': 'الطب العام/الجراحي',
'Geocode': 'الترميز الجغرافي',
'Geotechnical Hazards': 'المخاطر الجيوتقنية',
'GIS integration to view location details of the Shelter': 'GIS لعرض تفاصيل موقع المأوى',
'GIS Reports of Shelter': 'تقارير نظام المعلومات الجغرافية للإيواء',
'Global Messaging Settings': 'ضبط الرسائل العامة',
'Go': 'اذهب',
'Goods Received Note': 'ملاحظة عن السلع الواردة',
'Google Maps': 'خرائط كوكل',
'Google Satellite': 'كوكل ستلايت',
'Government building': 'مبنى حكومي',
'Government UID': 'رمز المستخدم للحكومة',
'GPS': 'نظام تحديد المواقع GPS',
'GPS Marker': 'ماركر نظام تحديد المواقع (GPS)',
'GPX Track': 'مسار GPX',
'Grade': 'التخرج',
'Greek': 'يونانية',
'green': 'أخضر',
'Green': 'أخضر',
'Group': 'فوج',
'Group added': 'تمت اضافة الفوج',
'Group Members': 'أعضاء المجموعة',
'Group Name': 'اسم الفوج',
'Group Title': 'عنوان الفوج',
'Group Type': 'نوع الفوج',
'Groups': 'الأفواج',
'Groups removed': 'تمت إزالة الأفواج',
'Hair Style': 'أسلوب الشعر',
'Has additional rights to modify records relating to this Organization or Site.': 'لديه حقوق إضافية لتعديل السجلات المتعلقة بهذه المنظمة أو الموقع.',
'Has the Certificate for receipt of the shipment been given to the sender?': 'هل لديه شهادة مسلمة للمرسِل لاستلام الشحنة؟',
'Has the GRN (Goods Received Note) been completed?': 'هل تم الانتهاء من تسجيل السلع المستلمة ؟',
'Hazard': 'مخاطر',
'Hazards': 'المخاطر',
'Health': 'الصحة',
'Health care assistance, Rank': 'دعم الرعاية الصحية،نظام',
'Health center': 'مركز صحي',
'Health center without beds': 'مركز صحي بدون أسرة',
'Heat Wave': 'موجة حر شديد',
'Height': 'Height',
'Height (cm)': 'القامة (سم)',
'Height (m)': 'الإرتفاع (م)',
'Help': 'مساعدة',
'here': 'هنا',
'Hierarchy Level 1 Name (e.g. State or Province)': 'إسم المستوى 1 من التسلسل الهرمي (مثال : ناحية أو ولاية)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'اسم التسلسل الهرمي للمستوى 3 ا(مثل المدينة / البلدة / القرية)',
'Highest Priority Open Requests': 'فتح طلبات مهم للغاية',
'Hit the back button on your browser to try again.': 'اضغط زر الرجوع في المتصفح الخاص بك لإعادة المحاولة.',
'Home': 'الصفحة الرئيسية',
'Home Address': 'عنوان المنزل',
'Home Crime': 'جريمة عائلية',
'Hospital': 'المستشفى',
'Hospital Details': 'تفاصيل المستشفى',
'Hospital information added': 'تمت اضافة المعلومات الخاصة بالمستشفى',
'Hospitals': 'مستشفيات',
'Host National Society': 'الفرع المضيف',
'Hot Spot': 'هوت سبوت',
'hourly': 'كل ساعة',
'Hours': 'الساعات',
'Household kits received': 'أطقم المعدات المنزلية الواردة',
'Households below poverty line': 'الاسر تحت خط الفقر',
'How is this person affected by the disaster? (Select all that apply)': 'كيف تضرر هذا الشخص من جراء الكارثة؟ (اختر كل ما ينطبق عليه)',
'How long will the food last?': 'كم سيدوم الغذاء؟',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'كم عدد الفتيان (0-17 عاما) المفقودين بسبب الأزمة',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'كم عدد الفتيات (0-17 سنوات) اللائي توفين بسبب الأزمة',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'كم عدد الفتيات (0-17 عاما)اللواتي أُصبن بسبب الأزمة',
'How many Men (18 yrs+) are Injured due to the crisis': 'كم عدد الرجال المصابين(+18 عاما ) بسبب الأزمة',
'How many Men (18 yrs+) are Missing due to the crisis': 'كم عدد الرجال ( + 18 عاما) مفقود بسبب الأزمة',
'How many new cases have been admitted to this facility in the past 24h?': 'كم عدد الحالات الجديدة التي تم نقلها لهذا المرفق في 24ساعة الماضية؟',
'How many Women (18 yrs+) are Missing due to the crisis': 'كم عدد النساء (+18عاما) المفقودات بسبب الأزمة',
'Human Resource Management': 'إدارة الموارد البشرية',
'Human Resources': 'الموارد البشرية',
'Hurricane': 'اعصار',
'Hygiene': 'النظافة',
'Hygiene NFIs': 'مواد النظافة',
'Hygiene practice': 'ممارسة النظافة',
'I am available in the following area(s)': 'انا متوفر في المجالات التالية',
'ID Tag Number': 'رقم البطاقة التعريفية',
'Identity Details': 'تفاصيل الهوية',
'Identity updated': 'تم تحديث الهوية',
'IFRC DM Resources': 'موارد الاتحاد الدولي',
'If it is a URL leading to HTML, then this will downloaded.': 'إذا كان هذا رابطا يؤدي إلى HTML اذن سيتم التحميل.',
'If neither are defined, then the Default Marker is used.': 'إذا لم يتم تحديد أي أحد ،ستستخدم علامة افتراضية.',
'If no marker defined then the system default marker is used': 'إذا لم يكن هناك علامة محددة سوف يتم استخدام العلامة الافتراضية للنظام',
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'اذا تم الاختيار,فسيتم تحديث موقع الضبط حيثما وجد موقع الشخص.',
'If the request type is "Other", please enter request details here.': 'إذا كان نوع الطلب "أخر"، إذا سمحت أدخل تفاصيل الطلب هنا.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'إذا كان هذا التكوين يمثل منطقة لقائمة المناطق ، أعطه اسما لاستخدامه في القائمة. سيكون اسم التكوين الشخصي للخريطة موافقا لاسم المستخدم.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'إذا تم وضع العلامة، سيصبح هذا موقع قاعدة المستخدم حيث ظهرعلى الخريطة',
'If this record should be restricted then select which role is required to access the record here.': 'إذا إستلزم تقييد هذا التسجيل فاختر الدور المناسب للدخول في التسجيل هنا.',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'إذا لم تدخل وثيقة مرجعية ، سيتم عرض البريد الإلكتروني الخاص بك للتحقق من هذه البيانات.',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": 'إذا لم تر المستشفى في القائمة، يمكنك إضافته بالضغط على "أضف مستشفى\'.',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "إذا كنت لا ترى المكتب في القائمة ، يمكنك إضافة واحدة جديدة بالنقر على رابط 'إضافة مكتب'.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "إذا كنت لا ترى المنظمة في القائمة ، يمكنك إضافة واحدة جديدة بالنقر على رابط 'أضف منظمة'.",
'Image': 'صورة',
'Image deleted': 'تم حذف الصورة',
'Image Details': 'تفاصيل الصورة',
'Image Tags': 'شعار الصور',
'Image Type': 'نوع الصورة',
'Image updated': 'تم تحديث الصورة',
'Impact added': 'تمت اضافة الأثر',
'Impact Assessments': 'أثرالتقييمات',
'Impact Type updated': 'تم تحديث نوع التأثير',
'Impact Types': 'تأثير الأنواع',
'Impact updated': 'تم تحديث الأثر',
'Impacts': 'آثار',
'Import': 'استيراد',
'Import & Export Data': 'استيراد وتصدير البيانات',
'Import Catalog Items': 'تصدير فهرس البنود',
'Import Community Data': 'تصدير بيانات لمنظمه',
'Import Data': 'استيراد البيانات',
'Import File': 'ادخال مستند',
'Import Hours': 'ادخال عدد الساعات',
'Import multiple tables as CSV': 'استيراد جداول متعددة ك CSV',
'Import Participant List': 'استيراد لائحة المشاركين',
'Import Projects': 'تصدير مشاريع',
'Import Project Communities': 'تصدير مشروع لمنظمة',
'Import Project Organizations': 'تصدير مشروع المنظمات',
'Import Staff': 'تصدير موظفين',
'Import Suppliers': 'تصدير مجهز',
'Import Training Participants': 'تصدير المتدربين',
'Import Warehouses': 'ادخال المخزونات',
'Import Warehouse Stock': 'ادخال سند المخزونات',
'Important': 'مهم',
'Importing data from spreadsheets': 'استيراد بيانات من جداول البيانات',
'Improper decontamination': 'تطهير غير مناسب',
'in Deg Min Sec format': 'في مقاس درجة دقيقة ثانية',
'in GPS format': 'في شكل GPS',
'In Inventories': 'في قوائم الجرد',
'In Process': 'في اطار الانجاز',
'In Progress': 'في تقدم',
'in Stock': 'in Stock',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'الخريطة ستكبر ملء النافذة ، لذلك لا حاجة لتعيين قيمة كبيرة هنا.',
'Incident Report added': 'أُضيف تقرير الحادث',
'Incident Report deleted': 'تم حذف تقرير الحادث',
'Incident Report updated': 'تم تحديث تقريرالحادث',
'Incident Reports': 'تقارير الكوارث',
'Incident Type': 'نوع الحادت',
'Incident Types': 'انواع الحوادث',
'Incidents': 'الحوادث',
'Incoming Shipment canceled': 'تم إلغاء الشحن الواردة',
'Industrial': 'صناعي',
'Industrial Crime': 'جريمة صناعية',
'Industry Fire': 'حريق صناعي',
'Infant (0-1)': 'الرضع (0-1)',
'Infectious Disease': 'مرض معد',
'Infectious Diseases': 'الأمراض المعدية',
'Information gaps': 'الفجوات في المعلومات',
'Infusions needed per 24h': 'الحقن اللازمة لكل 24 ساعة',
'insert new': 'إدراج جديد',
'Inspected': 'تم تفقده',
'Inspection Date': 'تاريخ التفتيش',
'Inspection date and time': 'تاريخ ووقت التفقد',
'Inspector ID': 'هوية المفتش',
'Institution': 'مؤسسة',
'Instructor': 'المدرب',
'Insufficient Privileges': 'لا توجود صلاحيه للدخول',
'Interview taking place at': 'تجرى المقابلة في',
'Invalid': 'غير صحيح',
'Inventories': 'قوائم الجرد',
'Inventory': 'المخازن',
'Inventory Item Details': 'تفاصيل جرد السلع',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'من المتوقع أن تتألف من عدة وحدات فرعية التي تعمل معا لتوفير وظائف معقدة لإدارة مواد الإغاثة والمشاريع من قبل المنظمة. هذا يشمل نظام المدخول ، نظام إدارة المستودعات ، تتبع السلع الأساسية ، إدارة سلسلة التوريد ، إدارة الأسطول ، المشتريات ، تتبع المالية والأصول الأخرى وكفاءات إدارة الموارد',
'Is it safe to collect water?': 'هل جمع المياه آمن؟',
'Item': 'عنصر',
'Item/Description': 'الماده / الوصف',
'Item Added to Shipment': 'أُضيف عنصر للشحنة',
'Item already in budget!': 'عنصر قد ورد في الميزانية!',
'Item already in Bundle!': 'عنصرموجود في الحزمة سابقا!',
'Item already in Kit!': 'العنصر موجود في الطقم!',
'Item Catalog Details': 'تفاصيل موضوع الكاتالوج',
'Item Categories': 'لوائح المفردات',
'Item Category Details': 'تفاصيل نوع العنصر',
'Item Pack deleted': 'تم حذف عنصر المجموعة',
'Item Pack Details': 'تفاصيل الحزمة',
'Item Pack updated': 'تحديث حزم المواد',
'Item removed from Inventory': 'تم إزالة العنصر من الجرد',
'Item Tracking Status': 'عناصر البحث',
'Items': 'العناصر',
'Japanese': 'اليابانية',
'Jew': 'يهودي',
'Job Role added': 'أُضيف الدور الوظيفي',
'Job Role Catalog': 'دليل الدور الوظيفي',
'Job Role deleted': 'تم حذف الدور الوظيفي',
'Job Title': 'عنوان العمل',
'Job Title Catalog': 'لائحة العنوان الوظيفي',
'Job Titles': 'مسيمات الوظيفة',
'Jobs': 'المهن',
'Journal': 'يومية',
'Journal entry added': 'تمت اضافة مدخل اليومية',
'Journal entry updated': 'تم تحديث مدخل اليومية',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'يتتبع جميع التذاكر الواردة ويسمح بتصنيفها وتوجيهها إلى المكان المناسب لتفعيلها.',
'Kit': 'معدات',
'Kit added': 'تمت اضافة الطقم',
'Kit Contents': 'محتويات الطقم',
'Kit updated': 'تم تحديث الطقم',
'Lack of material': 'نقص المواد',
'Lack of transport to school': 'نقص وسائل النقل إلى المدرسة',
'Lahar': 'سيل بركاني',
'Landslide': 'انزلاق تربة',
'Language': 'اللغة',
'Last Name': 'اللقب',
'Last updated on': 'آخر تحديث',
'Latitude & Longitude': 'خط العرض وخط الطول',
'Latitude is North-South (Up-Down).': 'خط العرض شمال-جنوب (من الأعلى الى الأسفل).',
'Latitude of far northern end of the region of interest.': 'خط عرض نهاية أقصى شمال المنطقة المعنية.',
'Latitude of far southern end of the region of interest.': 'خط عرض نهاية اقصى جنوب المنطقة المعنية.',
'Latitude should be between': 'خط العرض ينبغي أن يكون بين',
'Layer deleted': 'تم حذف الطبقة',
'Layers': 'الطبقات',
'Lead Facilitator': 'قيادة المنشأت',
'Leader': 'قائد',
'Legend Format': 'شكل العنوان',
'legend URL': 'عنوان الرابط',
'Length (m)': 'الطول(م)',
'Level': 'المستوى',
'Level 1': 'المستوى 1',
'Level 1 Assessment deleted': 'حذف تقييم مستوى1',
'Level 2': 'المستوى 2',
'Level 2 Assessment added': 'تمت اضافة تقويمات المستوى 2',
'Level 2 Assessment updated': 'تم تحديث التقييم في المستوى 2',
'Level 2 Assessments': 'المستوى 2 للتقييمات',
'Level of Award': 'مستوى التقدم',
'LICENSE': 'الرخصة',
'Link to this result': 'رابط إلى هذه النتيجة',
'List': 'قائمة',
'List / Add Baseline Types': 'قائمة / إضافة أنواع الخطوط القاعدية',
'List All': 'كل القائمة',
'List Assets': 'قائمة الإمتيازات',
'List available Scenarios': 'قائمة السيناريوهات المتاحة',
'List Catalog Items': 'قائمة عناصر الكاتالوج',
'List Certifications': 'قائمة الشهادات',
'List Conflicts': 'قائمة المضاربات',
'List Courses': 'قائمة الدورات',
'List Credentials': 'لائحة أوراق الاعتماد',
'List Current': 'القائمة الحالية',
'List Feature Layers': 'قائمة خصائص الطبقات',
'List Flood Reports': 'قائمة تقاريرالفيضانات',
'List Groups': 'قائمة المجموعات',
'List Groups/View Members': 'قائمة المجموعات / عرض الأعضاء',
'List Human Resources': 'قائمة الموارد البشرية',
'List Identities': 'قائمة الهويات',
'List Impact Assessments': 'قائمة تقييمات الأثر',
'List Impact Types': 'قائمة أنواع التأثير',
'List Impacts': 'قائمة التاثيرات',
'List Items in Inventory': 'قائمة العناصر في الجرد',
'List Kits': 'قائمة الأطقم',
'List Level 1 Assessments': 'قائمة التقييمات للمستوى1',
'List Level 1 assessments': 'قائمة تقييمات المستوى 1',
'List Level 2 Assessments': 'قائمة التقييمات مستوى 2',
'List Level 2 assessments': 'قائمة التقييمات للمستوى 2',
'List Log Entries': 'قائمة سجل الإدخالات',
'List Map Profiles': 'قائمة خريطة التعديلات',
'List Members': 'قائمة الأعضاء',
'List Messages': 'قائمة الرسائل',
'List Need Types': 'قائمة أنواع الاحتياجات',
'List of addresses': 'قائمة عناوين',
'List of Items': 'قائمة العناصر',
'List of Missing Persons': 'قائمة الأشخاص المفقودين',
'List of Volunteers for this skill set': 'قائمة المتطوعين لهذه المهارة',
'List Offices': 'قائمة المكاتب',
'List Organizations': 'قائمة المنظمات',
'List Personal Effects': 'قائمة التأثيرات الشخصية',
'List Population Statistics': 'قائمة إحصاء السكان',
'List Received Shipments': 'قائمة الشحنات التي استقبلت',
'List Records': 'List Records',
'List Resources': 'قائمة الموارد',
'List Scenarios': 'قائمة السيناريوهات',
'List Sent Items': 'قائمة العناصر المرسلة',
'List Service Profiles': 'قائمة خدمة البيانات الشخصية',
'List Settings': 'إعدادات القائمة',
'List Shelter Types': 'قائمة أنواع المأوى',
'List Skill Equivalences': 'قائمة تكافؤ المهارات',
'List Status': 'قائمة الأوضاع',
'List Tickets': 'قائمة تذاكر',
'List Units': 'قائمة الوحدات',
'List Users': 'قائمة المستخدمين',
'List Warehouses': 'قائمة المستودعات',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'قوائم "من يفعل ماذا و أين". تسمح بتنسيق أعمال وكالات النجدة',
'Live Help': 'مساعدة حية',
'Livelihood Manager': 'مدير الاعاشة',
'Load Cleaned Data into Database': 'تحميل البيانات الكاملة إلى قاعدة البيانات',
'Local Name': 'الاسم المحلي',
'Location': 'موقع',
'Location 1': 'موقع 1',
'Location 2': 'الموقع 2',
'Location added': 'تمت اضافة الموقع',
'Location deleted': 'تم حذف الموقع',
'Location Detail': 'تفاصيل الموقع',
'Location group cannot be a parent.': ' لا يمكن أن يكون موقع المجموعة أصلي',
'Location Hierarchy Level 3 Name': 'اسم موقع التسلسل الهرمي للمستوى3',
'Location Hierarchy Level 4 Name': 'اسم مستوى4 للموقع على التسلسل الهرمي',
'Location Hierarchy Level 5 Name': 'اسم موقع التسلسل الهرمي للمستوى 5',
'Location updated': 'تم تحديث الموقع',
'Location: ': 'الموقع:',
'Locations': 'مواقع',
'Log entry updated': 'تم تحديث السجل',
'Logged in': 'تسجيل الدخول',
'Logged out': 'تسجيل خروج',
'Login': 'تسجيل الدخول',
'Logistics': 'اللوجستية',
'Logout': 'خروج',
'Longitude': 'خط الطول',
'Longitude is West-East (sideways).': 'يتمحورخط الطول من الغرب إلى الشرق (جانبي).',
'Longitude of far eastern end of the region of interest.': 'خط الطول لأبعد نهاية في الشرق الأقصى من المنطقة المهمة.',
'Longitude of Map Center': ' طول مركز خريطة',
'Long Name': 'الاسم الكامل',
'Lost': 'مفقود',
'Lost Password': 'فقدت كلمة السر',
'low': 'منخفض',
'Magnetic Storm': 'عاصفة مغناطيسية',
'Major': 'الاختصاص',
'male': 'ذكر',
'Managing material and human resources together to better prepare for future hazards and vulnerabilities.': 'ادارة الموارد البشرية والمادية للاعداد افضل في حاله المخاطر المستقبلية',
'Manage National Society Data': 'بيانات ادارة الجمعية الوطنية',
'Manage office inventories and assets.': 'ادارة المخان والموجودات الثانيتة',
'Manage Offices Data': 'بيانات ادارة المكاتب',
'Manage Relief Item Catalogue': 'كتالوج إدارة عنصر الإغاثة',
'Manage requests of hospitals for assistance.': 'إدارة طلبات المستشفيات للحصول على المساعدة.',
'Manage Staff Data': 'بيانات ادارة الموضفين',
'Manage Teams Data': 'بيانات ادارة الفرق',
'Manage volunteers by capturing their skills, availability and allocation': 'إدارة المتطوعين من خلال التقاط مهاراتهم ، وتوافرهم وتوزيعهم',
'Manage Warehouses/Sites': 'إدارة المستودعات / المواقع',
'Manager': 'مدير',
'Managing Office': 'المكتب الاداري',
'Manual Synchronization': 'مزامنة يدوية',
'Many': 'عدة',
'Map': 'خريطة',
'Map Center Latitude': 'خط العرض لمركز الخريطة',
'Map Center Longitude': 'خط الطول المركزي للخريطة',
'Map Height': 'إرتفاع الخريطة',
'Map of Communities': 'خرائط المنظمات',
'Map of Hospitals': 'خريطة المستشفيات',
'Map Profile added': 'تمت اضافة تكوين الخريطة',
'Map Profile deleted': 'تم حذف تكوين الخريطة',
'Map Profiles': 'تكوينات الخريطة',
'Map Service Catalog': 'كتالوج خدمات الخريطة',
'Map Settings': 'اعدادات الخريطة',
'Map Width': 'عرض الخريطة',
'Map Zoom': 'تكبير الخريطة',
'Marital Status': 'الحالة الزوجية',
'Marker': 'علامة',
'Marker deleted': 'تم حذف العلامة',
'Marker Details': 'تفاصيل العلامة',
'married': 'متزوج',
'Match Requests': 'طلبات متشابهه',
'Matching Catalog Items': 'تطابق عناصر المصنف',
'Maximum Location Latitude': 'الموقع الأقصى لخط العرض',
'Maximum Location Longitude': 'أقصى خط طول للمكان',
'Medical Conditions': 'الحاله الصحية',
'medium': 'متوسط(ة)',
'Membership': 'عضوية',
'Membership Details': 'تفاصيل العضوية',
'Membership updated': 'تم تحديث العضوية',
'Message added': 'تمت اضافة الرسالة',
'Message Details': 'تفاصيل الرسالة',
'Message field is required!': 'حقل الرسالة مطلوب!',
'Messages': 'رسائل',
'Messaging settings updated': 'تم تحدبث ضبط الرسائل',
'Migrants or ethnic minorities': 'المهاجرون أو الأقليات العرقية',
'Military': 'عسكري',
'Minimum Location Longitude': 'خطوط الطول الأدنى للموقع',
'Minor Damage': 'أضرار طفيفة',
'Minorities participating in coping activities': 'الأقلية المشاركة في الأنشطة',
'Miscellaneous': 'متفرقات',
'Missing': 'مفقود',
'Missing Person': 'الشخص المفقود',
'Missing Person Details': 'تفاصيل الشخص المفقود',
'Missing Person Registry': 'سجل الشخص المفقود',
'Missing Report': 'تقرير مفقود',
'Missing Senior Citizen': 'كبار السن المفقودين',
'Mission Details': 'تفاصيل المهمة',
'Mission updated': 'تم تحديث المهمة',
'Mobile': 'المحمول',
'Mobile Phone': 'الهاتف المحمول',
'Mode': 'النمط',
'Model/Type': 'نوع/ نموذج',
'Modem Settings': 'ضبط الموديم',
'Modem settings updated': 'تم تحديث إعدادات المودم',
'Modifying data in spreadsheet before importing it to the database': 'تعديل البيانات في جدول قبل استيراده إلى قاعدة البيانات',
'module allows the site administrator to configure various options.': 'الوحدة التي تسمح لمسؤول عن الموقع لضبط مختلف الخيارات.',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'توفر الوحدة آلية تعاونية و التي تزود بلمحة عامة عن الكوارث النامية ، وذلك باستخدام الخرائط مباشرة على شبكة الإنترنت (GIS).',
'Monthly Cost': 'التكلفة الشهرية',
'Monthly Salary': 'الراتب الشهري',
'Months': 'أشهر',
'more': 'المزيد',
'More Options': 'خيارات اخرى',
'Morgue Status': 'حالة مكان حفظ الجثث',
'Morgue Units Available': 'وحدات المشرحة المتوفرة',
'Mosque': 'مسجد',
'Multiple Matches': 'تعدد التطابقات',
'Muslim': 'مسلم',
'My Tasks': 'مهامي',
'N/A': 'غير موجود',
'Name': 'الاسم',
'Name of Award': 'أسم مستوى التقدم',
'Name of Driver': 'اسم السائق',
'Name of Father': 'اسم الاب',
'Name of Grandfather': 'اسم الجد',
'Name of Institute': 'أسم المعهد',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'يقع اسم الملف (و المسار الفرعي الإختياري ) في وجهات النظر التي يجب استخدامها أسفل الصفحة .',
'Names can be added in multiple languages': 'يمكن إضافة الأسماء في لغات متعددة',
'National': 'وطني',
'National Societies': 'المنظمات الوطنية',
'National Society': 'الجمعيه الوطنية',
'National Society / Branch': 'فروع / المنظمة الوطنية',
'Nationality': 'الجنسيه',
'Nationality of the person.': 'جنسية الشخص.',
'Need to be logged-in to be able to submit assessments': 'يجب أن يتم التسجيل للتمكن من تقديم تقييمات',
'Need to specify a group!': 'بحاجة إلى تحديد مجموعة!',
'Need to specify a Kit!': 'تحتاج إلى تخصيص طقم!',
'Need to specify a Resource!': 'يجب تحديد المصدر!',
'Need to specify a table!': 'تحتاج إلى تحديد جدول!',
'Need to specify a user!': 'تحتاج إلى تحديد المستخدم!',
'Need Type': 'نوع الحاجة',
'Need Type deleted': 'تم حذف نوع الحاجة',
'Need Type updated': 'تم تحديث نوع الحاجة',
'Needs Details': 'تحتاج إلى تفاصيل',
'Needs to reduce vulnerability to violence': 'يحتاج للحد من التعرض للعنف',
'negroid': 'زنجاني',
'Neonatology': 'حديثي الولادة',
'Network': 'شبكة',
'Neurology': 'طب الأعصاب',
'never': 'أبدا',
'new': 'جديد',
'New Assessment reported from': 'ذكر التقييم الجديد من',
'New Certificate': 'شهادة جديدة',
'New Checklist': 'قائمة جديدة',
'New Event': 'نشاط جديد',
'new record inserted': 'تم إدراج سجل جديد',
'New Skill': 'مهارة جديدة',
'New Team': 'فريق جديد',
'News': 'أخبار',
'No': 'كلا',
'No action recommended': 'لم يُوصى بأي عمل',
'No Activities Found': 'لم يتم العثور على نشاطات',
'No Assets currently registered in this event': 'لم تسجل حاليا أي مدخرات خلال هذاالحدث.',
'No Baselines currently registered': 'لا توجد خطوط قاعدية مسجلة حاليا',
'No Catalog Items currently registered': 'لا عناصرمسجلة حاليا',
'No Catalogs currently registered': 'لا مصنفات مسجلة حاليا',
'No Cluster Subsectors currently registered': 'لم تسجل حاليا أي كتلة للقطاعات الفرعية',
'No Commitment Items currently registered': 'لم تسجل حالياأي عناصر التزام',
'No Commitments': 'لا توجد أي التزامات',
'No contacts currently registered': 'ليس هناك أي إتصال مسجل',
'No Credentials currently set': 'لا إعتمادات مرسخة حاليا',
'No dead body reports available': 'لا توجد تقاريرعن الجثث الميتة',
'No Details currently registered': 'لم تسجل حاليا أي تفاصيل',
'No entries found': 'لم يتم العثور على إدخالات',
'No entries matching the query': 'لا توجد إدخالات مطابقة لطلب المعلومات',
'No Events currently registered': 'لا يوجد أحداث مسجلة حاليا',
'No Facilities currently registered in this scenario': 'لا توجد أي مرافق مسجلة حاليا في هذا السيناريو',
'No Feature Layers currently defined': 'لا توجد طبقات ميزة معرفة حاليا',
'No File Chosen': 'لم يتم اختيار مستند',
'No Groups currently defined': 'لا مجموعات محددة حاليا',
'No Hospitals currently registered': 'ليست هناك اي مستشفى مسجلة حاليا',
'No Human Resources currently registered in this event': 'لا توجد موارد بشرية مسجلة حاليا في هذا الحدث',
'No Images currently registered': 'لا توجد صور مسجلة في الوقت الراهن',
'No Impact Types currently registered': 'لا يوجد أي تسجيل لأنواع الأثرالآن',
'No Incident Reports currently registered': 'لم يسجل حاليا أي تقارير عن الحادث',
'No Incoming Shipments': 'لا توجد شحنات واردة',
'No location known for this person': 'لا يوجد موقع معروف خاص بهذا الشخص',
'No locations found for members of this team': 'لم يتم العثور على مواقع لأعضاء هذا الفريق',
'No Map Profiles currently defined': 'لا توجد تكوينات خريطة معرفة حاليا',
'No Map Profiles currently registered in this event': 'لا تكوينات خريطة مسجلة حاليا في هذا الحدث',
'No Markers currently available': 'لا علامات متاحة حاليا',
'No Match': 'ليس هناك تطابق',
'No Matching Records': 'لاتوجد أية تسجيلات مطابقة',
'No Members currently registered': 'لا يوجد أي أعضاء مسجلين حاليا',
'No Memberships currently defined': 'لا عضوية معرفة حاليا',
'No Messages currently in Outbox': 'لا توجد رسائل حاليا في البريد الصادر',
'No Needs currently registered': 'لا احتياجات مسجلة حاليا',
'No Persons currently registered': 'لا أشخاص مسجلين حاليا',
'No Persons currently reported missing': 'لا يوجد أشخاص في عداد المفقودين حاليا',
'No Persons found': 'لم يتم العثور على أي شخص',
'No Picture': 'لا وجود لصورة',
'No Population Statistics currently registered': 'لا توجد إحصائيات للسكان مسجلة حاليا',
'No Presence Log Entries currently registered': 'لا وجود لأي مدخل سجل حديثا',
'No Rapid Assessments currently registered': 'لا تقييمات سريعة مسجلة حاليا',
'No Received Items currently registered': 'ليست هناك عناصر مستلمة و مسجلة حاليا',
'No records found': 'لا توجد سجلات',
'No records matching the query': 'لا يوجد تسجيلات مطابقة للاستعلامات',
'No resources currently reported': 'لا يوجد اي مصادر مذكورة حاليا',
'No Rivers currently registered': 'لا أنهار مسجلة حاليا',
'No Rooms currently registered': 'لا توجد غرف مسجلة حاليا',
'No Scenarios currently registered': 'ليس هناك سيناريوهات مسجلة حاليا',
'No Sectors currently registered': 'لا توجد أي قطاعات مسجلة حاليا',
'No service profile available': 'لا تتوفر لمحة عن الخدمة',
'No Solutions currently defined': 'لاتوجد حلول معرفة حاليا',
'No Staff currently registered': 'لا يوجد أي موظفين مسجلين حاليا',
'No staff or volunteers currently registered': 'لا يوجد أي موظفين أو متطوعين مسجلين حاليا',
'No Staff Types currently registered': 'لا توجد أي أنواع للموظفين مسجلة حاليا',
'No status information available': 'لا توجد معلومات متوفرة عن الحالة',
'No Survey Template currently registered': 'لا يوجد أي قالب مسح مسجل حاليا',
'No synchronization': 'لا مزامنة',
'No Tasks with Location Data': 'لا يوجد مهام مرفوقة ببيانات الموقع',
'No Teams currently registered': 'لا فرق مسجلة حاليا',
'No template found!': 'لم يتم العثور على التصميم!',
'No units currently registered': 'لا توجد أي وحدات مسجلة حاليا',
'No Users currently registered': 'لا يوجد أعضاء مسجلين حاليا',
'No volunteer availability registered': 'لاوفرة لمتطوعين مسجلين',
'No Volunteers currently registered': 'لا متطوع مسجل حاليا',
'No Warehouses currently registered': 'لا توجد مستودعات مسجلة حاليا',
'Non-structural Hazards': 'مخاطر غير متعلقة بالبنية',
'Normal Job': 'العنوان الوظيفي',
'not accessible - no cached version available!': 'لا يمكن الوصول إليها --لا توجد نسخة متوفرة محفوظة!',
'Not Applicable': 'غير قابل للتطبيق',
'Not Authorized': 'غير مرخص',
'Not installed or incorrectly configured.': 'غير مثبت أو تم تكوينها بشكل غير صحيح.',
'Not Possible': 'غيرممكن',
'Not yet a Member of any Group': 'لا عضوية مسجلة حاليا',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'علما أن هذه القائمة تظهر فقط المتطوعين الناشطين.لرؤية جميع الأشخاص المسجلين في هذا النظام،إبحث من خلال هذا هذه الشاشة بدلا',
'Notice to Airmen': 'بلاغ للطيارين',
'Number of deaths during the past 24 hours.': 'عدد الوفيات خلال ال 24 ساعة الماضية.',
'Number of discharged patients during the past 24 hours.': 'عدد المرضى الذين تم إخراجهم في 24 ساعة الماضية',
'Number of Patients': 'عدد المرضى',
'Number of private schools': 'عدد المدارس الخاصة',
'Number of religious schools': 'عدد المدارس الدينية',
'Number/Percentage of affected population that is Female & Aged 0-5': 'رقم / النسبة المئوية للسكان المتضررين من الإناث التي تتراوح أعمارهن 0-5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'رقم/النسبة المئوية للسكان المتضررين من الإناث وتتراوح أعمارهن بين 13-17',
'Number/Percentage of affected population that is Female & Aged 26-60': 'رقم / النسبة المئوية للسكان المتضررين من الإناث اللائي سنهن بين 26-60 سنة',
'Number/Percentage of affected population that is Female & Aged 6-12': 'عدد/النسبة المئوية للسكان المتضررين من الإناث الذين سنهم بين 6-12',
'Number/Percentage of affected population that is Male & Aged 13-17': 'عدد/النسبة المئوية للسكان المتضررين الذكور الذين سنهم يتراوح بين 13-17',
'Number/Percentage of affected population that is Male & Aged 26-60': 'رقم /النسبة المئوية للسكان المتضررين و الذين هم ذكور وسنهم بين 26-60 سنه',
'Nutrition': 'التغذية',
'Nutrition problems': 'مشاكل التغذية',
'NZSEE Level 2': 'NZSEE المستوى 2',
'Obsolete': 'مهمل',
'obsolete': 'غير مفعل',
'Obstetrics/Gynecology': 'التوليد / أمراض النساء',
'OD Coordinator': 'منسقي التطوير المؤسسي',
'Office added': 'تمت اضافة المكتب',
'Office Phone': 'هاتف المكتب',
'Office Type': 'نوع المكتب',
'Office/Warehouse/Facility': 'مكتب / مخزن / منشأة',
'Offices': 'المكاتب',
'Offices & Warehouses': 'مكاتب ومستودعات',
'OK': 'موافق',
'Older people as primary caregivers of children': 'كبار السن كمقدمي الرعاية الأولية للأطفال',
'Older people participating in coping activities': 'مشاركة المسنين في أفضل الأنشطة',
'Older person (>60 yrs)': 'شخص مسن (>60 عام)',
'On by default?': 'مشغل افتراضيا؟',
'On by default? (only applicable to Overlays)': 'افتراضيا؟ (لا ينطبق إلا على الأغطية)',
'One time cost': 'تكلفة المرة الواحدة',
'One-time': 'مرة واحدة',
'One-time costs': 'تكاليف لمرة واحدة',
'Oops! Something went wrong...': 'عفوا! حدث خطأ ما ...',
'Open': 'فتح',
'open defecation': 'فتح التصفية',
'Open recent': 'فتح الحديثة',
'Opening Times': 'وقت الافتتاح',
'OpenStreetMap (Humanitarian)': 'خرائط الطرق مباشرة (الانسانية)',
'OpenStreetMap (MapQuest)': 'خرائط الطرق مباشرة (الضيوف)',
'Operating Rooms': 'غرف العمليات',
'Optional': 'اختياري',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "اختياري. اسم geometry colum. في PostGIS افتراضيا هو 'the_geom'.",
'Organization': 'المنظمة',
'Organization added': 'تمت اضافة المنظمة',
'Organization Details': 'تفاصيل المنظمة',
'Organization/Branch': 'المنظمة / الفرع',
'Organization/Supplier': 'منظمة / المورد',
'Organizations': 'المنظمات',
'Organized By': 'تنظيم بواسطة',
'Origin': 'الأصل',
'Other (specify)': 'أخرى (حدد)',
'Other activities of boys 13-17yrs before disaster': 'أنشطة أخرى للفتيان في سن 13 - 17 سنة قبل الكارثة',
'Other activities of boys <12yrs': 'أنشطة أخرى للصبيان <12yrs',
'Other activities of girls 13-17yrs before disaster': 'أنشطة أخرى للفتيات ما بين 13-17 سنة قبل الكارثة',
'Other activities of girls<12yrs': 'نشاطات أخرى للفتيات<12سنة',
'Other alternative infant nutrition in use': 'تغذية الرضع البديلة الأخرى المستعملة',
'Other assistance, Rank': 'غيرها من المساعدات، الرتبة.',
'Other current health problems, adults': 'غيرها من المشاكل الصحية الراهنة ، كبار',
'Other Details': 'ملاحظات اخرى',
'Other events': 'أحداث أخرى',
'Other Evidence': 'غيرها من الأدلة',
'Other Faucet/Piped Water': 'صنبور أخرى / أنابيب المياه',
'Other Isolation': 'عازلات أخرى',
'Other Level': 'المستويات الاخرى',
'Other major expenses': 'نفقات رئيسية أخرى',
'Other school assistance, source': 'المساعدات المدرسية الأخرى ، مصدر',
'Other side dishes in stock': 'الأطباق الجانبية الأخرى في المخزون',
'Outbox': 'البريد الصادر',
'Overall Hazards': 'الأخطار الشاملة',
'Overlays': 'تراكب',
'Owned By (Organization/Branch)': 'مملوكة من قبل (منظمة / فرع)',
'Pack': 'مجموعة',
'Packs': 'حزم',
'Parent needs to be set for locations of level': 'يحتاج الأصل إلى أن يضبط من أجل مواقع المستوى',
'Participant': 'مشارك',
'Partner Organizations': 'المنظمات الشريكة',
'Partners': 'شركاء',
'Pashto': 'باشتون',
'Passport': 'جواز السفر',
'Password': 'كلمة المرور',
"Password fields don't match": 'حقلا كلمة المرور لا يتطابقان',
'Patients': 'المرضى',
'Pediatric ICU': 'وحدة العناية المركزة للأطفال',
'Pediatrics': 'طب الأطفال',
'People Needing Food': 'الأشخاص المحتاجون إلى الغذاء',
'People Needing Shelter': 'الأشخاص الذين يحتاجون إلى مأوى',
'People Needing Water': 'الأشخاص الذين يحتاجون إلى المياه',
'People Trapped': 'الناس المحاصرون',
'Person': 'شخص',
'Person 1': 'الشخص 1',
'Person 1, Person 2 are the potentially duplicate records': 'الشخص 1، الشخص 2 يحتمل أن تكون هي التسجيلات المكررة',
'Person added': 'تمت اضافة الشخص',
'Person deleted': 'تم حذف الشخص',
'Person Details': 'تفاصيل الشخص',
'Person interviewed': 'الشخص الذي تمت مقابلته',
'Persons': 'الأشخاص',
'Persons in institutions': 'الأشخاص في المؤسسات',
'Persons with disability (mental)': 'الأشخاص ذوي الإعاقة (العقلية)',
'Persons with disability (physical)': 'الأشخاص ذوي الإعاقة (الجسدية)',
'Phone': 'هاتف',
'Phone 1': 'هاتف 1',
'Phone 2': 'هاتف 2',
'Phone/Emergency': 'هاتف / طوارئ',
'Photo': 'الصورة',
'Photo deleted': 'تم حذف الصورة',
'Photo Details': 'تفاصيل الصورة',
'Photo Taken?': 'هل أُخذت الصورة ؟',
'Photo updated': 'تم تحديث الصورة',
'Photograph': 'صورة',
'Physical Description': 'الوصف الجسدي',
'Picture upload and finger print upload facility': ' تحميل الصور وسهولة تحميل بصمات الأصابع',
'PIN number ': 'رقم التعريف الشخصي PIN',
'pit': 'حفرة',
'pit latrine': 'حفرة المرحاض',
'PL Women': 'القانون العام الخاص بالنساء',
'Place': 'المكان',
'Place on Map': 'المكان على الخريطة',
'Places for defecation': 'أماكن للتغوط',
"Please come back after sometime if that doesn't help.": 'يرجى العودة بعد قليل إذا كان هذا لا يساعد.',
'Please enter a first name': 'من فضلك قم بادخال الاسم',
'Please enter a site OR a location': 'الرجاء إدخال الموقع أو مكان التواجد',
'Please enter a valid email address': 'يرجى إدخال عنوان بريد إلكتروني صالح',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'الرجاء إدخال بعض الحروف الأولى للشخص / مجموعة لإكمال ذاتي.',
'Please enter the recipient': 'الرجاء إدخال المستلم',
'Please fill this!': 'يرجى ملء هذا!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'يرجى تقديم عنوان الصفحةURL المشار إليها،واصفا ما توقعت حدوثه و ما يحدث حاليا',
'Please report here where you are:': 'يرجى تبيان مكان وجودك:',
'Please select': 'اختر من فضلك',
'Please select another level': 'يرجى اختيار مستوى آخر',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'يرجى تسجيل رقم هاتفك الخلوي لنتمكن من إرسال رسائل نصية.يرجى تضمين الرمزالكامل للمنطقة.',
'Pledge Support': 'ضمان الدعم',
'Point': 'نقطة',
'Poisoning': 'تسمم',
'Pollution and other environmental': 'تلوث بيئي وغيره',
'Population': 'السكان',
'Population Statistic added': 'تمت أضافة إحصاء السكان',
'Population Statistic deleted': 'تم حذف إحصاء السكان',
'Population Statistics': 'إحصاءات السكان',
'Porridge': 'عصيدة',
'Port Closure': 'غلق الميناء',
'Position Catalog': 'وظيفة الكاتالوج',
'Positions': 'مواقف',
'Postcode': 'الرمز البريدي',
'postponed': 'مؤجل',
'Power Failure': 'إنقطاع التيار الكهربائي',
'Powered by Sahana Eden': 'بدعم من عدن ساهانا',
'Precipitation forecast': 'توقع الامطار',
'Presence': 'تواجد',
'Priority': 'الأولوية',
'Privacy': 'خاص',
'Private': 'خاص',
'Problem Administration': 'إدارة المشكلة',
'Problem connecting to twitter.com - please refresh': 'هناك مشكلة في الاتصال بـ twitter.com - يرجى تحديث الصفحة',
'Problem deleted': 'مشكلة محذوفة',
'Problem Details': 'تفاصيل المشكلة',
'Problem Title': 'عنوان المشكلة',
'Process Shipment to Send': 'معالجة الشحنة التي سيتم ارسالها',
'Profile': 'ملف تعريف',
'Program': 'البرنامج',
'Program Hours': 'ساعات العمل',
'Programme Manager': 'مدير المبرمجين',
'Programs': 'برامج',
'Project': 'المشروع',
'Project Communities': 'مشاريع منضمات المجتمع المدني',
'Project deleted': 'تم حذف المشروع',
'Project Name': 'اسم المشروع',
'Project Officer': 'موظف المقترح',
'Project updated': 'تم تحديث المشروع',
'Projection': 'تقدير/تخطيط/اسقاط',
'Projects': 'المشاريع',
'Protection': 'حماية',
'Provide a password': 'توفير كلمة مرور',
'Provide Metadata for your media files': 'توفر بيانات وصفية لملفات الوسائط',
'Province': 'محافظة',
'Proxy-server': 'خادم الوكيل (بروكسي)',
'Psychiatrics/Adult': 'طب الامراض العقلية/ الراشدين',
'Public': 'عمومي',
'Public and private transportation': 'النقل العام و الخاص',
'Public assembly': 'تجمع شعبي',
'Public Event': 'حدث عام',
'Pull tickets from external feed': 'سحب تذاكر من تغذية خارجية',
'Punjabi': 'البنجابية',
'Purchase Date': 'تاريخ الشراء',
'Pyroclastic Surge': 'حمم بركانية',
'Quantity': 'كمية',
'Quantity Returned': 'الكمية المطلوبة',
'Quantity Sent': 'الكمية المرسلة',
'Queries': 'الاستعلامات',
'Radiological Hazard': 'المخاطر الإشعاعية',
'Radiology': 'الأشعة',
'Railway Hijacking': 'اختطاف قطار',
'Rain Fall': 'سقوط المطر',
'Rainfall - last 1 day (mm)': 'هطول الامطار - يوم واحد',
'Rainfall - last 10 days accumulated (mm)': 'هطول الامطار - اجمالي عشرة ايام',
'Rapid Assessment': 'تقييم سريع',
'Read-only': 'للقراءة فقط',
'Receive': 'استلام',
'Receive New Shipment': 'استلام شحنه جديده',
'Receive Shipment': 'تلقي شحنة',
'Receive this shipment?': 'استيلام هذه الشحنة؟',
'Received': 'استلم (ت)',
'Received By': 'استلمت بواسطة',
'Received By': 'الاستقبال ب-',
'Received Shipment canceled': 'إلغاء الشحنة المستلمة',
'Received Shipment updated': 'تم تحديث الشحنة المتلقاة',
'Received Shipments': 'الشحنات المستلمة',
'Received/Incoming Shipments': 'الاستقبال / الشحنات الواردة',
'Recommendation Letter Types': 'نوع شكر والتقدير',
'Recommendations for Repair and Reconstruction or Demolition': 'توصيات لإصلاح وإعادة بناء أو هدم',
'RECORD A': 'تسجيل أ',
'Record added': 'تمت اضافة التسجيل',
'RECORD B': 'تسجيل ب',
'Record Details': 'تفاصيل التسجيل',
'record does not exist': 'سِجل غير موجود',
'Record Saved': 'تم حفظ السجل',
'Recovery Request deleted': 'حُذف طلب الإسترجاع',
'Recurring': 'متكرر',
'Recurring costs': 'التكاليف المتكررة',
'Recurring Request?': 'Recurring Request?',
'Red Cross / Red Crescent': 'الصليب الأحمر/ الهلال الأحمر',
'Red Cross & Red Crescent National Societies': 'جمعيات الهلال والصليب الاحمر',
'Refresh Rate (seconds)': 'معدل الإنعاش(ثواني)',
'Region': 'المنطقة',
'Regional': 'الإقليمية',
'Register': 'تسجيل',
'Register As': ' تسجيل بـأسم',
'Register for Account': 'التسجيل الحساب',
'Register Person': 'تسجبل شخص',
'Registration': 'تسجيل',
'Registration added': 'تمت اضافة التسجيل',
'Registration Details': 'تفاصيل التسجيل',
'Registration updated': 'تم تحديث التسجيل',
'Reinforced masonry': 'بناء معزز',
'Rejected': 'مرفوض',
'Religion': 'الديانة',
'Religious': 'دينية',
'Remember Me': 'تذكرني',
'Remove': 'أزال',
'Remove existing data before import': 'حذف البيانات الحالية قبل الادخال',
'Remove Facility from this event': 'إزالة مرفق من هذا الحدث',
'Remove Human Resource from this scenario': 'إزالة الموارد البشرية من هذا السيناريو',
'Remove Map Profile from this event': 'إزالة تكوين خريطة من هذا الحدث',
'Remove Map Profile from this scenario': 'حذف ضبط الخريطة من هذا السيناريو',
'Repaired': 'تم اصلاحه',
'Repeat your password': 'كرر كلمة السر الخاصة بك',
'Replace if Newer': 'استبدال إذا كان هناك تحديث',
'Report': 'تقرير',
'Report added': 'تمت اضافة التقرير',
'Report deleted': 'إلغاء التقرير',
'Report Details': 'تفاصيل التقرير',
'Report my location': 'تقريرعن موقعي',
'Report Options': 'خيارات التقرير',
'Report them as found': 'قررعنهم كما وجدوا',
'Report updated': 'تم تحديث التقرير',
'Reported By': 'اعداد',
'Reporter': 'مقرر',
'Reporter Name': 'اسم المراسل',
'Reports': 'تقارير',
'Request added': 'أُضيف الطلب',
'Request Added': 'تم إضافة الطلب',
'Request Item added': 'تم إضافة عنصر الطلب',
'Request Item deleted': 'تم حذف الطلب',
'Request Items': 'طلب الوحدات',
'Request password reset': 'أعادة طلب تعيين كلمة المرور',
'Requested by': 'مطلوب من',
'Requested From': 'طلب من',
'Requested Items': 'العناصر المطلوبة',
'Requester': 'الطالب',
'Requests': 'المطالب',
'Required Fields': 'متطلبات الميدان',
'Rescue and recovery': 'الإنقاذ و الإنعاش',
'Reset': 'إعادة تعيين',
'Resolve': 'حل',
'Resource': 'المورد',
'Resource added': 'تم إضافة المصادر',
'Resource Details': 'تفاصيل عن الموارد',
'Resource updated': 'تم تحديث الموارد',
'Resources': 'الموارد',
'Restricted Access': 'دخول مقيد',
'Restricted Use': 'استخدام محدد',
'Retail Crime': 'تجزئة الجريمة',
'retired': 'متقاعد',
'Retrieve Password': 'استرجاع كلمة السر',
'retry': 'retry',
'Return': 'عودة',
'Return to Request': 'العودة الى الطلب',
'Returned': 'تمت العودة',
'Returned From': 'عاد من',
'Rice': 'أرز',
'Riot': 'شغب',
'River deleted': 'تم حذف النهر',
'River Details': 'تفاصيل النهر',
'River updated': 'تم تحديث النهر',
'Rivers': 'الأنهار',
'Road Accident': 'حادث سير',
'Road Conditions': 'أحوال الطريق',
'Road Delay': 'تأخيرالطريق',
'Road Usage Condition': 'حالة استخدام الطريق',
'Role': 'القواعد',
'Role deleted': 'تم حذف الدور',
'Role Details': 'تفاصيل الدور',
'Role Updated': 'تم تحديث الدور',
'Role updated': 'تم تحديث الدور',
'Roles': 'الأدوار',
'Roof tile': 'قرميد السقف',
'Room': 'غرفة',
'Room Details': 'تفاصيل الغرفة',
'Rooms': 'غرف',
'Run Functional Tests': 'تشغيل الاختبارات الوظيفية',
'Safety of children and women affected by disaster?': 'هل تضررت سلامة الأطفال والنساء من الكارثة؟',
'Sahana Eden <=> Other': 'ساهانا عدن <=> أخرى',
'Sahana Eden <=> Sahana Eden': 'ساهانا عدن <=> ساهانا عدن',
'Sahana Eden Website': 'موقع ساهانا عدن',
'Saturday': 'السبت',
'Save': 'حفظ',
'Saved.': 'تم الحفظ.',
'Saving...': 'إنقاذ',
'Scale of Results': 'جدول النتائج',
'Scenario': 'السيناريو',
'Scenario deleted': 'تم حذف السيناريو',
'Scenario Details': 'تفاصيل السيناريو',
'Scenario updated': 'تم تحديث السيناريو',
'Scenarios': 'سيناريوهات',
'School': 'المدرسة',
'School activities': 'الأنشطة المدرسية',
'School assistance': 'المساعدات المدرسية',
'School attendance': 'الحضورالمدرسي',
'School Closure': 'اختتام المدرسة',
'Sea Level: Rise of 2m': 'مستوى البحر : ارتفاع ٢م',
'Search': 'ابحث',
'Search Activity Report': 'بحث عن تقرير نشاط',
'Search Asset Log': 'بحث عن مدخل الضبط',
'Search Baseline Type': 'البحث في نوع القاعدة',
'Search Certifications': 'البحث عن الشهادات',
'Search Commitment Items': 'البحث عن عناصر الالتزامات',
'Search Contact Information': 'بحث عن معلومات الاتصال',
'Search Contacts': 'البحث عن إتصالات',
'Search Courses': 'بحث عن الدروس',
'Search Credentials': 'البحث عن أوراق الإعتماد',
'Search Documents': 'بحث عن وثائق',
'Search Feature Class': 'البحث في خصائص الفئة',
'Search for a Person': 'البحث عن شخص',
'Search for a shipment by looking for text in any field.': 'البحث عن الشحنة بالإطلاع على النص في أي مجال.',
'Search for a shipment received between these dates': 'بحث عن شحنة واردة بين هذه التواريخ',
'Search for Staff or Volunteers': 'البحث عن موظفين أو متطوعين',
'Search for warehouse by organization.': 'البحث عن مستودع حسب المنظمات.',
'Search Groups': 'بحث الأفواج',
'Search here for a person record in order to:': 'إبحث هنا عن سجل الشخص من أجل:',
'Search Human Resources': 'البحث عن الموارد البشرية',
'Search Impacts': 'البحث عن الآثار',
'Search Incident Reports': 'البحث عن تقارير الحوادث',
'Search Item Categories': 'البحث عن تصنيفات العنصر',
'Search Keys': 'البحث عن مفاتيح',
'Search Layers': 'بحث عن طبقات',
'Search Level 1 Assessments': 'تقييمات بحث المستوى1',
'Search Level 2 Assessments': 'البحث في تقييمات المستوى 2',
'Search location in Geonames': 'موقع البحث في الأسماء الجغرافية',
'Search Log Entry': 'بحث سجل الدخول',
'Search Map Profiles': 'البحث عن تكوينات الخريطة',
'Search Members': 'البحث عن الاعضاء',
'Search Membership': 'البحث عن عضوية',
'Search messages': 'بحث عن رسائل',
'Search Offices': 'بحث المكاتب',
'Search Organizations': 'ابحث عن منظمات',
'Search Photos': 'البحث عن صور',
'Search Population Statistics': 'البحث عن الإحصاءات السكانية',
'Search Positions': 'بحث وظائف',
'Search Projections': 'بحث التوقعات',
'Search Received Items': 'بحث العناصر المستلمة',
'Search Received Shipments': 'البحث عن الشحنات المتلقاة',
'Search Records': 'البحث في السجلات',
'Search Registations': 'البحث عن تسجيلات',
'Search Registration Request': 'ابحث عن طلب تسجيل',
'Search Request': 'البحث عن طلب',
'Search Request Items': 'بحث عناصرالطلب',
'Search Roles': 'ابحث عن أدوار',
'Search Rooms': 'البحث عن الغرف',
'Search Scenarios': 'ابحث عن سيناريوهات',
'Search Sent Items': 'البحث عن العناصر المرسلة',
'Search Settings': 'إعدادات البحث',
'Search Shipped Items': 'بحث مفردات الشحنة',
'Search Skill Equivalences': 'البحث عن مهارات معادلة',
'Search Solutions': 'بحث حلول',
'Search Subscriptions': 'البحث عن الإشتراكات',
'Search Tasks': 'بحث عن مهام',
'Search Teams': 'البحث عن فرق',
'Search Themes': 'بحث الموضوعات',
'Search Tickets': 'البحث عن التذاكر',
'Search Tracks': 'البحث عن مسارات',
'Search Training Participants': 'البحث عن المشاركين في الدورات',
'Search Units': 'بحث الوحدات',
'Search Users': 'بحث عن المستخدمين',
'Search Warehouses': 'البحث عن مستودعات',
'Searching for different groups and individuals': 'البحث عن مجموعات وأفراد مختلفين',
'Seconds must be a number between 0 and 60': 'الثواني يجب أن تكون بين العدد 0 و 60',
'Secretary General': 'السكرتير العام',
'Section deleted': 'تم حذف القسم',
'Section Details': 'تفاصيل الباب',
'Sections': 'الفروع',
'Sector': 'القطاع',
'Sector added': 'القطاع المضاف',
'Sector deleted': 'تم حذف القطاع',
'Sector updated': 'تم تحديث القطاع',
'Sectors': 'القطاعات',
'Security': 'الامن',
'Security Officer': 'موظف الامن',
'Security problems': 'المشاكل الأمنية',
'See all': 'عرض الكل',
'see comment': 'انظر التعليق',
'Select': 'أختيار',
'Select a location': 'حدد موقعا',
'Select a question from the list': 'إختر سؤالا من القائمة',
'Select All': 'أختيار الكل',
'Select all that apply': 'إختر كل ما ينطبق',
'Select the person assigned to this role for this project.': 'حدد الشخص المعين لهذا الدور لهذا المشروع.',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "اختر هذا إذا كانت كافة المواقع المحددة تحتاج إلى أصل على أعمق مستوى من التسلسل الهرمي للموقع. على سبيل المثال، إذا كان 'حي' هو أصغر تقسيم في التسلسل الهرمي، فسوف يتم الزام جميع المواقع المحددة أن تكون 'المنطقة' أصلا لها.",
'Select to show this configuration in the Regions menu.': 'حدد لإظهار هذا التكوين في قائمة المناطق.',
'selected': 'أختيار',
'Send': 'إرسال',
'Send a message to this person': 'إبعث رسالة إلى هذا الشخص',
'Send a message to this team': 'إرسال رسالة إلى هذا الفريق',
'Send New Shipment': 'إرسال شحنة جديدة',
'Send Shipment': 'ارسال الشحنة',
'Senior (50+)': 'كبار (+ 50)',
'Sent By Person': 'أرسلت من قبل شخص',
'Sent Item deleted': 'تم حذف العنصر المُرسل',
'Sent Item Details': 'تفاصيل العناصر المرسلة',
'Sent Item updated': 'نم تحديث المادة',
'Sent Shipment Details': 'تفاصيل الشحنة المرسلة',
'Sent Shipments': 'الشحنات المرسلة',
'separated': 'منفصل',
'Separated children, caregiving arrangements': 'الأطفال المنفصلين عن ذويهم ، ترتيبات الرعاية',
'separated from family': 'انفصل عن الأسرة',
'Serial Number': 'الرقم التسلسلي',
'Server': 'الموزع',
'Service Catalog': 'كتالوج الخدمة',
'Service or Facility': 'خدمة أو مرفق',
'Service profile updated': 'تم تحديث بروفايل الخدمة',
'Service Record': 'خلاصه المتطوع',
'Services': 'الخدمات',
'Services Available': 'الخدمات المتوفرة',
'Setting updated': 'تم تحديث الإعداد',
'Settings': 'إعدادات',
'Settings updated': 'تم تحديث الإعدادات',
'Severe': 'صعب',
'Severity': 'قسوة',
'Sex': 'جنس',
'Share a common Marker (unless over-ridden at the Feature level)': 'يشتركون في واصمة (علامة) مشتركة (ما لم يلغى على مستوى التقييم)',
'shaved': 'تم إزالته',
'Shelter': 'لاجى',
'Shelter & Essential NFIs': 'المأوى والمواد غير الغذائية الأساسية (NFIs)',
'Shelter added': 'تمت اضافة المأوى',
'Shelter deleted': 'تم حذف المأوى',
'Shelter Details': 'تفاصيل الملجأ',
'Shelter Service Details': 'تفاصيل خدمات المأوى',
'Shelter Services': 'خدمات المأوى',
'Shelter Type updated': 'تم تحديث نوع الملجا',
'Shelter Types': 'أنواع الملاجئ',
'Shipment Items': 'عناصر الشحن',
'Shipment Items received by Inventory': 'عناصر الشحنة التي وردت في الجرد',
'Shipment Type': 'نوع الشحنة',
'Shipments To': 'الشحنات إلى',
'Shooting': 'إطلاق نار',
'short': 'قصير',
'Short Title / ID': 'الاسم المختصر',
'Showing': 'يعرض',
'Show': 'عرض',
'Show Details': 'إظهار التفاصيل',
'Show Map': 'عرض الخريطة',
'Show on map': 'أظهر على الخريطة',
'Show Region in Menu?': 'إظهار منطقة في القائمة؟',
'Showing _START_ to _END_ of _TOTAL_ entries': ' المدخلات _TOTAL_ ل _END_ الى _START_ عرض',
'sign-up now': 'سجل الآن',
'Sign-up succesful - you should hear from us soon!': 'تسجيل ناجح - من المفروض أن الرد سيكون عن قريب!',
'single': 'أعزب',
'Site': 'موقع',
'Site or Location': 'الموقع أو المكان',
'Sketch': 'رسم تخطيطي',
'Skill': 'المهارة',
'Skill added': 'تمت اضافة المهارة',
'Skill Catalog': 'انواع المهارات',
'Skill Details': 'تفاصيل المهارة',
'Skill Equivalence added': 'تمت اضافة مهارة معادلة',
'Skill Equivalence updated': 'تم تحديث المهارة المعادلة',
'Skill Provision': 'توفير المهارات',
'Skill Provision added': 'تمت اضافة توفر المهارة',
'Skill Provision Catalog': 'كاتالوج توفيرالمهارات',
'Skill Provision deleted': 'تم حذف توفيرالمهارة',
'Skill Provision Details': 'تفاصيل اعتماد المهارة',
'Skill Provisions': 'توفر المهارة',
'Skill updated': 'تم تحديث المؤهلات',
'Skills': 'المهارات',
'Skills Catalog': 'دليل المهارات',
'slim': 'نحيل',
'Slope failure, debris': 'إنزلاق التربة، حُطام',
'Snapshot': 'لقطة',
'Snow Squall': 'عاصفة ثلجية',
'Soil bulging, liquefaction': 'انتفاخ أو سيلان التربة',
'Solid waste': 'النفايات الصلبة',
'Solution': 'حل',
'Solution added': 'تمت اضافة الحل',
'Solution deleted': 'تم حذف الحل',
'Solution Details': 'تفاصيل الحل',
'Sorry that location appears to be outside the area supported by this deployment.': 'آسف يظهرهذا المكان خارج المنطقة التي يدعمها هذا النشر.',
'Sorry, I could not understand your request': 'آسف ، لا يمكنني فهم طلبك',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'عذرا ، يسمح فقط للمستخدمين ذوي دور MapAdmin لإنشاء مجموعات مواقع.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'عذرا! يسمح فقط للمستخدمين الذين لهم دور MapAdmin لتحرير هذه المواقع',
'Sorry, that service is temporary unavailable.': 'عذرا، هذه الخدمة غير متوفرة مؤقتا.',
'Sorry, there are no addresses to display': 'عذرا، لا توجد أية عناوين للعرض',
"Sorry, things didn't get done on time.": 'عذرا ، لم يتم فعل الأشياء في الوقت المناسب.',
"Sorry, we couldn't find that page.": 'آسف،لم نتمكن من ايجاد تلك الصفحة',
'Source': 'مصدر',
'Sources of income': 'مصادر الدخل',
'Space Debris': 'حطام فضائي',
'Spanish': 'الاسبانية',
'Special Ice': 'ثلج خاص',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'منطقة محددة (مثل مبنى/غرفة) في المكان الذي يرى فيه هذا الشخص/الفريق.',
'specify': 'حدد',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'تحديد عدد الوحدات (لتر)رينغر -اللاكتات أو ما يعادلها من الحلول المحتاج اليها في 24سا',
'Spreadsheet uploaded': 'تم رفع الجدول',
'Staff': 'العاملين',
'staff': 'الموظفين',
'Staff 2': 'الموظفين 2',
'Staff & Volunteers (Combined)': 'مجاميع الموظفين والعاملين',
'Staff added': 'تنت اضافة الموظفين',
'Staff and Volunteers': 'الموظفين والمتطوعين',
'Staff deleted': 'إقالة الموظفين',
'Staff ID': 'رقم الموظف',
'Staff Member added': 'تمت الاضافه بنجاح',
'Staff Member Details': 'بيانات الموظف',
'Staff Members': 'أعضاء الهيئة',
'Staff Record': 'سجل الموظفين',
'Staff Report': 'تقرير الموظفين',
'Staff Type added': 'تم إضافة نوع من الموظفين',
'Staff Type deleted': 'تم حذف نوع الموظفين',
'Staff Type Details': 'تفاصيل نوع الموظفين',
'Staff Type updated': 'تم تحديث نوع الموظفين',
'Staff with Contracts Expiring in the next Month': 'الموظفين بعقود تنتهي في الشهر القادم',
'Start Date': 'تاريخ البدء',
'Start of Period': 'بداية الفترة/المرحلة',
'state': 'الدولة',
'Status': 'الحالة',
'Status of general operation of the facility.': 'حالة التشغيل العام للمرفق.',
'Status of morgue capacity.': 'قدرة إستيعاب غرفة حفظ الجثث.',
'Status of operations of the emergency department of this hospital.': 'حالة العمليات لقسم الطوارئ في هذا المستشفى.',
'Status of the operating rooms of this hospital.': 'حالة غرف العمليات لهذا المستشفى.',
'Status Report': 'تقرير الحالة',
'Status updated': 'تم تحديث الوضع',
'Steel frame': 'الإطار الصلب',
'Stock': 'المخزون',
'Stock in Warehouse': 'السندات المستودع',
'Stocks and relief items.': 'مخازن والمواد الاغاثية',
'Stowaway': 'مهاجر غير شرعي',
'Street Address': 'عنوان السكن',
'Strong Wind': 'ريح قوية',
'Structural': 'بنائيّ',
'Sub-type': 'النوع الفرعي',
'Subject': 'الموضوع',
'Submission successful - please wait...': 'تمت العملية بنجاح -- يرجى الانتظار...',
'Submit': 'حفظ',
'Submit a request for recovery': 'تقديم طلب لاسترداد',
'Submit New (full form)': 'أرسل جديد (نموذج كامل)',
'Submit new Level 1 assessment (full form)': 'تقديم تقييم مستوى 1 جديد (نموذج كامل)',
'Submit new Level 1 assessment (triage)': 'تقديم تقييم جديد للمستوى 1 (فرز)',
'Submit new Level 2 assessment': 'أرسل تقييم جديد للمستوى الثاني',
'Subscriptions': 'الاشتراكات',
'Subsistence Cost': 'كلفة المادة',
'Suburb': 'ضاحية',
'suffered financial losses': 'الخسائر المالية',
'Summary': 'موجز',
'Sunday': 'الأحد',
'Supplier/Donor': 'الموردون/ المانحون',
'Suppliers': 'المجهزين',
'Supplies': 'لوازم',
'Support Requests': 'طلبات الدعم',
'Surgery': 'جراحة',
'Survey Answer deleted': 'جواب الاستمارة حذف',
'Survey Question deleted': 'تم حذف أسئلة الدراسة',
'Survey Question Details': 'تفاصيل أسئلة المسح',
'Survey Series': 'سلاسل المسح',
'Survey Series deleted': 'تم حذف سلسلة الدراسة',
'Survey Series Details': 'تفاصيل سلسلة المسح',
'Survey Template': 'قالب المسح',
'Survey Template added': 'تم إضافة قالب المسح',
'Survey Template Details': 'تفاصيل قالب المسح',
'Symbology': 'استعمال الرموز',
'Sync Conflicts': 'صراعات المزامنة',
'Sync History': 'تاريخ التزامن',
'Sync Settings': 'إعدادات المزامنة',
'Synchronization': 'مزامنة',
'Synchronization Details': 'تفاصيل المزامنة',
'Synchronization History': 'تاريخ التزامن',
'Synchronization mode': 'Synchronization mode',
'Synchronization not configured.': 'لم يتم تعديل التزامن.',
'Synchronization Settings': 'أعدادات المزامنة',
"System's Twitter account updated": 'تم تحديث حساب للنظام على twitter',
'Table': 'جدول',
'tall': 'طويل',
'Task added': 'تمت اضافة المهمة',
'Task Details': 'تفاصيل المهمة',
'Team added': 'تمت إضافة الفريق',
'Team Description': 'وصف الفريق',
'Team Details': 'تفاصيل الفريق',
'Team ID': 'هوية الفريق',
'Team Members': 'أعضاء الفريق',
'Team Name': 'اسم الفريق',
'Team Type': 'نوع الفريق',
'Teams': 'فرق',
'Telephone': 'الهاتف',
'Telephony': 'الاتصالات الهاتفية',
'Template Name': 'اسم نموذج التقيم',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'اسم المستوى الخامس داخل التقسيم الإداري للبلاد (مثل تقسيم الرمز البريدي). هذا المستوى لا يستخدم غالبا.',
'Term for the secondary within-country administrative division (e.g. District or County).': 'المصطلح الثانوي في التقسيم الإداري داخل للبلد(مثال:محافظةأو دولة)',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'مصطلح للمستوى الثالث في التقسيم الإداري للبلد(مثال:مدينةأو بلدة)',
'Terrorism': 'إرهاب',
'Text': 'النص',
'Thanks for your assistance': 'شكرا لكم على المساعدة',
'The Area which this Site is located within.': 'المنطقة التي يقع فيها هذا الموقع.',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'نماذج تقيم لمخازن وتسمح بالاستجابه لهذه التقيم لاحداث محددة لتجمع وتحلل',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'وحدة تقييم المباني يسمح بتقييم سلامة المباني، على سبيل المثال بعد وقوع زلزال.',
'The contact person for this organization.': 'الشخص المكلف بالتواصل في هذه المنظمة.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'الموقع الحالي للفريق / الشخص، والذي يمكن ان يكون عام (للتقرير) أو دقيق (للعرض على خريطة). أدخل بعض الحروف للبحث من المواقع المتوفرة.',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "المانحون لهذا المشروع. يمكن تحديد قيم متعددة بضغط مفتاح 'المراقبةl' .",
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'عنوان البريد الإلكتروني الذي ترسل اليه طلبات الموافقة (عادة ما يكون هذا البريد لفريق بدلا من فرد). إذا كان الحقل فارغا فسوف تتم الموافقة على الطلبات تلقائيا إذا كان المجال موافقا.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'الإبلاغ عن الحوادث (Incident Reporting System) نظام يسمح للجمهور العام بتقريرالحوادث و تتبع هذه الأخيرة.',
'The language you wish the site to be displayed in.': 'اللغة التي ترغب ان يتم عرض الموقع فيها.',
'The list of Brands are maintained by the Administrators.': 'يقوم المسؤولون بالاحتفاظ بقائمة العلامات التجارية.',
'The list of Catalogs are maintained by the Administrators.': 'يقوم المسؤولون بالاحتفاظ بقائمة السجلات.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'الموقع الذي يذهب اليه الشخص يمكن ان يكون عاما (للتقرير) او محددا (للعرض على خريطة). قم بإدخال بعض الحروف للبحث في المواقع المتوفرة.',
'The map will be displayed initially with this latitude at the center.': 'ابتدائيا سيتم عرض الخريطة في المركز على خط العرض هذا.',
'The map will be displayed initially with this longitude at the center.': 'سيتم عرض الخريطة في البداية مع هذا العرض في المركز.',
'The Media Library provides a catalog of digital media.': 'توفر مكتبة وسائل الإعلام كتالوج لوسائل الإعلام الرقمية.',
'The name to be used when calling for or directly addressing the person (optional).': 'استخدام الاسم عند طلبه أو مخاطبة الشخص مباشرة (اختياري)',
'The next screen will allow you to detail the number of people here & their needs.': 'الشاشة التالية سوف تسمح لك بتفصيل عدد الناس هنا واحتياجاتهم.',
'The post variable on the URL used for sending messages': 'البريد متغير حسب العنوان URLالمستعمل لإرسال الرسائل.',
'The post variables other than the ones containing the message and the phone number': 'متغيرات أخرى غير تلك التي تحتوي على رسالة ورقم الهاتف',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'وحدة تعقب المشروع تتيح خلق أنشطة لسد الثغرات في عملية تقييم الاحتياجات.',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'المنفذ التسلسلي أين يتم توصيل المودم - / dev/ttyUSB0 ،إلخ على linux و COM2 ، COM1 ، الخ في نظام التشغيل Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'لم يستقبل الخادم إجابة في الوقت المناسب من الخادم الآخر الذي كان يسعى للوصول لملئ طلب على يد متصفح.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'تلقى الخادم استجابة غير صحيحة من خادم آخر أنه كان داخلا لملء طلب من المتصفح.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'يتبع سجل كافة الملاجئ ويخزن التفاصيل الأساسية المتعلقة بهم.بالتعاون مع وحدات أخرى لتعقب الأشخاص المتعاونين مع ملجأ أخر،و توفر الخدمات إلخ',
'The Shelter this Request is from (optional).': 'المأوى الذي جاء منه هذا الطلب (اختياري).',
'The unique identifier which identifies this instance to other instances.': 'المعرف الوحيد الذي يحدد هذه الحالة إلى حالات أخرى.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': ' العنوانURL لحصول على قايلية صفحة خدمة شبكة الخريطة WMS التي تتوفر على الطبقات التي ترغب فيها عبر لوحة التصفح على الخريطة.',
'The URL of your web gateway without the post parameters': 'عنوان مدحل موقع الويب الخاص بك دون وضع سمات أخرى',
'The URL to access the service.': 'عنوان الموقع للوصول إلى الخدمة.',
'The way in which an item is normally distributed': 'الطريقة التي يتم بها عادة توزيع العنصر',
'Theme': 'الموضوع',
'Theme Details': 'تفاصيل الموضوع',
'Themes': 'المواضيع',
'There are insufficient items in the Inventory to send this shipment': 'لا توجد وحدات كافية في المخزون لإرسال هذه الشحنة',
'There are multiple records at this location': 'هناك سجلات متعددة في هذا الموقع',
'These are settings for Inbound Mail.': 'هذه هي الإعدادات للبريد الوارد.',
'This appears to be a duplicate of ': 'يظهر أن هذا مكرر لـ',
'This file already exists on the server as': 'هذا الملف موجود مسبقا على الملقم (server) ك',
'This Group has no Members yet': 'لا يوجد أي أعضاء مسجلين حاليا',
'This level is not open for editing.': 'هذا المستوى غير مفتوح من أجل التحرير.',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'لم تستقبل هده الشحنة.و.لم يتم الغائهاا لان لاتزال امكانية تحريرها',
'This shipment will be confirmed as received.': 'سيتم تأكيد هذه الشحنة كما سُلمت',
'This Team has no Members yet': 'لا يوجد أي أعضاء مسجلين حاليا',
'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'تضيف هذه القيمة مقدار صغير من المسافة خارجا النقاط. بدون هذا، فإن أبعد النقاط تكون على المربع المحيط، وربما لا تكون مرئية.',
'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'هده القيمة تعطي عرض ادنى و ارتفا ع في درجات المنطقة المبينة.فمن دون دلك فتبين الخريطة نقطة واحدة لن تظهر اي مدى حولها.بعده يتم عرض الخريطة كما يمكن تكبيرهاعلى النحو المرغوب فيه.',
'Thursday': 'الخميس',
'Ticket': 'نذكرة',
'Ticket deleted': 'تم حذف التذكرة',
'Ticket Details': 'تفاصيل عن التذكرة',
'Ticket updated': 'تم تحديث التذكرة',
'Ticketing Module': 'وحدة التذاكر',
'Time In': 'وقت الدخول',
'Time Out': 'وقت الخروج',
'Time Question': 'وقت السؤال',
'Timeline Report': 'تقريرالجدول الزمني',
'To begin the sync process, click the button on the right => ': 'لبدء عملية المزامنة ، انقر فوق الزر الموجود على اليمين =>',
'To create a personal map configuration, click ': 'لإنشاء تكوين خريطة شخصي، أنقر',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'لتحرير OpenStreetMap ، تحتاج إلى تعديل إعدادات OpenStreetMap في models/000_config.py',
'To Organization': 'الى المنظمة',
'To Person': 'إلى شخص',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "للبحث عن طريق اسم الشخص ، أدخل أي من الأسماء الأولى أو الوسط أو آخر، مفصولة بفراغ. يمكن لك استخدام ٪ كجوكير. انقر على 'بحث' دون إدخال قائمة جميع الأشخاص.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "للبحث عن مستشفى ، أدخل أي من أسماء أو معرفات للمستشفى ، أو اسم المنظمة أو أوائل حروف الكلمات ، مفصولة بمسافات. يجوز لك استخدام ٪ كجوكير. اضغط 'بحث' دون إدخال جميع المستشفيات إلى القائمة.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "للبحث عن مكان ما، أدخل الاسم. يجوز لك استخدام ٪ كجوكير إضغط على 'بحث' دون إدخاله في جميع قائمة لمواقع.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "للبحث عن شخص ما ، أدخل أي من الأسماء الأولى أو المتوسطة و/أو رقم الهوية للشخص، مفصولة بمسافات. يجوز لك استخدام ٪ كجوكير. اضغط 'بحث' دون ادخال جميع الأشخاص إلى القائمة.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "للبحث عن التقييم ، أدخل أي جزء من عدد التذاكر للتقييم. يجوز لك استخدام ٪ كجوكير. اضغط 'بحث' دون إدخال جميع التقييمات إلى القائمة.",
'Tools': 'أدوات',
'Tornado': 'إعصار',
'total': 'الكل',
'Total': 'الكل',
'Total # of Target Beneficiaries': 'مجموع # من المستفيدين المستهدفين',
'Total Annual Budget': 'اجمالي الميزانية السنوية',
'Total Beds': 'مجموع الأسِرة',
'Total Beneficiaries': 'مجموع المستفيدين',
'Total Cost': 'الكلفة الاجمالية',
'Total Funding Amount': 'اجمالي المقدار المالي',
'Total gross floor area (square meters)': 'مجموع المساحة الإجمالية للمنطقة(المتر المربع)',
'Total Monthly Cost': 'التكلفة الإجمالية الشهرية',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'إجمالي عدد الأسِرة في هذا المستشفى. يتم تحديثها تلقائيا من تقارير يومية.',
'Total number of houses in the area': 'مجموع المنازل في المنطقة',
'Total number of schools in affected area': 'اجمالي عدد المدارس في المنطقة المتأثرة',
'Total Persons': 'مجموع الأشخاص',
'Total Recurring Costs': 'مجموع التكاليف المتكررة',
'Total Unit Cost': 'مجموع تكلفة الوحدة',
'Total Volume (m3)': 'الحجم الكلي (م٣)',
'Total Weight (kg)': 'الوزن الكلي ( كيلوغرام )',
'Totals for Budget': 'مجموع الميزانية',
'Track Details': 'تفاصيل المسار',
'Tracking and analysis of Projects and Activities.': 'متابعة وتحليل المشاريع والنشاطات',
'Tracking of basic information on the location, facilities and size of the Shelters': 'أخذ معلومات أساسية عن الموقع, والتسهيلات وحجم الملاجئ',
'Tracking of Projects, Activities and Tasks': 'تتبع الأنشطة والمشاريع والمهام',
'Tracks': 'المسارات',
'Training': 'تدريب',
'Training Course Catalog': 'فهرس منهاج الدورة',
'Training Courses': 'منهاج الدورة',
'Training Event Details': 'تفاصيل عمل التدريب',
'Training Events': 'احداث الدورات التدريبية',
'Training Facility': 'الاستاذ المدرب',
'Training Report': 'تقرير الدورة',
'Training updated': 'تم تحديث التدريب',
'Trainings': 'الدورات التدريبية',
'Transit Status': 'طريق النقل',
'Transport Reference': 'مصادر النقل',
'Transportation assistance, Rank': 'نقل المساعدة ، الوضع',
'Transported by': 'نقل بواسطة',
'Tropical Storm': 'عاصفة استوائية',
'Tropo Settings': 'تغيير الإطارات',
'Tropo settings updated': 'تم تحديث إعدادات Tropo',
'Truck': 'شاحنة',
'Tsunami': 'تسونامي',
'Tuesday': 'الثلاثاء',
'Type': 'النوع',
'Type of Construction': 'نوع البناية',
'Type of Transport': 'نوع النقل',
'Un-Repairable': 'لا يمكن اصلاحه',
'Understaffed': 'ناقص',
'unidentified': 'مجهول الهوية',
'Unidentified': 'مجهول الهوية',
'Unit': 'الوحدة',
'Unit Cost': 'تكلفة الوحدة',
'Unit of Measure': 'وحدة القياس',
'Units': 'وحدات',
'Unknown': 'غير معروف',
'unknown': 'غير معروف',
'Unknown Peer': 'زميل غير معروف',
'Unknown type of facility': 'نوع غير معروف من المرافق',
'Unreinforced masonry': 'بناء غير مدعوم',
'Unselect to disable the modem': 'إلغاء الإختيار لتعطيل المودم',
'Unsent': 'غير مرسلة',
'unspecified': 'غير محدد',
'unverified': 'لم يتم التحقق منها',
'Update Cholera Treatment Capability Information': 'تحديث معلومات القدرة على علاج الكوليرا',
'Update if Newer': 'قم بالتحديث إذا كان الأحدث',
'Update Method': 'Update Method',
'Update Policy': 'Update Policy',
'Update Request': 'تحديث الطلب',
'Update Service Profile': 'تحديث خدمة البيانات الشخصية',
'Update Unit': 'تحديث وحدة',
'Update your current ordered list': 'تحديث القائمة المرتبة الحالية',
'updated': 'تم التحديث',
'Updated By': 'تم تحديثه من طرف',
'updates only': 'تحديثات فقط',
'Upload an image file here.': 'تحميل ملف الصور هنا.',
'Upload an image, such as a photo': 'تحميل صورة ، مثل صورة شمسية',
'Urban area': 'المنطقة الحضرية',
'Urdu': 'أوردو',
'Urgent': 'عاجل',
'Use Geocoder for address lookups?': 'استخدام Geocoder لعمليات البحث عن عنوان؟',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'تستعمل على وضع مؤشرمرشد فوق الرموز في جملة واضحة للتفريق بين الأنواع .',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'تستعمل على وضع مؤشرمرشد فوق الرموز كم أثستخدم المجال الأول في جملة واضحة للتفريق بين السجلات.',
'Used to import data from spreadsheets into the database': 'تستخدم لأخذ بيانات من جداول البيانات إلى قاعدة البيانات',
'User Account has been Disabled': 'تم تعطيل حساب المستخدم',
'User added': 'تمت اضافة المستخدم',
'User deleted': 'تم حذف المستخدم',
'User Management': 'إدارة المستخدمين',
'User updated': 'تم تحديث المستخدم',
'User Updated': 'تم تحديث المستخدم',
'Users': 'المستخدمين',
'Users removed': 'المستخدمين الملغين',
'Value per Pack': 'القيمة لكل حزمة',
'Various Reporting functionalities': 'تعدد الوظائف التقريرية',
'Vehicle': 'مركبة',
'Vehicle Plate Number': 'رقم اللوحة المرورية',
'Vehicle Types': 'أنواع السيارات',
'Verified?': 'تم التحقق منه؟',
'Verify Password': 'التحقق من كلمة السر',
'Version': 'نص',
'Very High': 'عال جدا',
'View and/or update their details': 'عرض و/أو تحديث بياناتهم',
'View Fullscreen Map': ' عرض الخريطة بشاشة كاملة',
'View or update the status of a hospital.': 'عرض أو تحديث حالة المستشفى.',
'View Outbox': 'عرض البريد الصادر',
'View pending requests and pledge support.': 'عرض الطلبات المعلقة و تعهدات الدعم.',
'View Test Result Reports': 'View Test Result Reports',
'View the hospitals on a map.': 'عرض المستشفيات على الخريطة.',
'Village Leader': 'زعيم القرية',
'Visible?': 'مرئي؟',
'Volcanic Ash Cloud': 'سحابة الرماد البركاني',
'Volcanic Event': 'حدث بركاني',
'Volume (m3)': 'الحجم (m3)',
'Volunteer': 'المتطوعين',
'Volunteer added': 'تم اضافة متطوع',
'Volunteer Availability': 'توفر المتطوعين',
'Volunteer availability deleted': 'تم إلغاء توفر المتطوعين',
'Volunteer availability updated': 'تحديث تَوفُرالمتطوعين',
'Volunteer deleted': 'تم حذف المتطوع',
'Volunteer Details': 'تفاصيل المتطوع',
'Volunteer Information': 'معلومات حول المتطوع',
'Volunteer Management': 'إدارة المتطوعين',
'Volunteer Project': 'مشروع التطوع',
'Volunteer Record': 'سجل المتطوع',
'Volunteer Report': 'تقرير المتطوعين',
'Volunteer Role': 'دور المتطوعين',
'Volunteer Role Catalog': 'فهرس دور المتطوعين',
'Volunteer Roles': 'أدوار المتطوعين',
'Volunteer Service Record': 'تسجيل خدمة المتطوع',
'Volunteers': 'المتطوعين',
'Volunteers Report': 'تقارير المتطوعين',
'Volunteers were notified!': 'تم ابلاغ المتطوعين!',
'Votes': 'الأصوات',
'Vulnerability': 'مواطن الضعف',
'Warehouse': 'المستودع',
'Warehouse added': 'أُضيف المستودع',
'Warehouse deleted': 'تم حذف المستودع',
'Warehouse Details': 'تفاصيل المستودع',
'Warehouse Manager': 'مدير المستودع',
'Warehouse Stock': 'المخزون في المستودع',
'Warehouse Stock Expiration Report': 'تقرير أنتهاء المخزون في المستودع',
'Warehouse Stock Report': 'تقرير سند المخزونات',
'Warehouse updated': 'تم تحديث المستودع',
'Warehouses': 'المخازن',
'Water Sanitation Hygiene': 'نظافة مياه الصرف الصحي',
'Watsan Officer': 'موظفي البناء والاصحاح',
'Watsan Technician': 'فني البناء والاصحاح',
'wavy': 'متموج',
'Waybill Number': 'رقم بوليصة الشحن',
'Weather': 'الطقس',
'Weather Stations': 'حالة الطقس',
'Website': 'موقع ويب',
'Weight': 'الوزن',
'Welcome to the Sahana Portal at': 'مرحبا بكم في بوابة ساهانا في',
'When reports were entered': 'متى أدخلت التقارير',
'Whiskers': 'شوارب',
'Who usually collects water for the family?': 'من الذي يجمع عادةالمياه للعائلة؟',
'widowed': 'أرمل',
'Width (m)': 'العرض (م)',
'Wild Fire': 'حريق بري',
'Wind Chill': 'رياح باردة',
'Window frame': 'إطار النافذة',
'Winter Storm': 'عاصفة شتائية',
'within human habitat': 'داخل المستوطنات البشرية',
'Women of Child Bearing Age': 'النساء في سن الإنجاب',
'Wooden poles': 'أعمدة خشبية',
'Work on Program': 'العمل ضمن برنامج',
'Working hours start': 'بدء ساعات العمل',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'وحدة xlwt غير متوفرة في Python - هذا يحتاج الى التثبيت لاخراجات XLS',
'Year': 'السنة',
'Year built': 'سنة البناء',
'Year of Manufacture': 'سنة الانتاج',
'Yellow': 'أصفر',
'yes': 'نعم',
'Yes': 'نعم',
'YES': 'نعم',
'You are currently reported missing!': 'تم الإبلاغ عنكم كمفقودين!',
'You can click on the map below to select the Lat/Lon fields': 'يمكنك النقر على الخريطة أدناه لتحديد حقول خطوط العرض والطول',
'You can select the Draw tool': 'يمكنك اختيارأداة الرسم',
'You can set the modem settings for SMS here.': 'يمكنك ضبط إعدادات المودم للرسائل القصيرة هنا.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'يمكنك استخدام أداة تحويل لتحويل إما من الإحداثيات أو درجة/دقيقة/ثانية.',
'You do not have permission for any facility to make a commitment.': 'ليس لديك إذن لأي منشأة لتقديم التزام.',
'You do not have permission for any facility to make a request.': 'ليس لديك إذن لأي منشأة لتقديم طلب.',
'You do not have permission for any site to receive a shipment.': 'ليس لديك إذن عن أي موقع لتلقي شحنة.',
'You do not have permission to cancel this received shipment.': 'لا يوجد لديك الإذن لإلغاء الشحنة الواردة.',
'You do not have permission to make this commitment.': 'ليس لديك إذن لهذا الالتزام.',
'You do not have permission to send this shipment.': 'ليس لديك الإذن لإرسال هذه الشحنة.',
'You have a personal map configuration. To change your personal configuration, click ': 'لديكم تعديلات الخريطة الشخصية. للتغيير. اضغط',
'You have found a dead body?': 'هل وجدت جثة هامدة؟',
'You must enter a minimum of %d characters': 'يجب عليك %d يجب عليك إدخال ما لا يقل عن',
'You must provide a series id to proceed.': 'يجب توفير سلسلة معرفة للمضي قدما.',
'Your post was added successfully.': 'تمت اضافة النشر الخاص بك بنجاح.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'لقد تم تعيين النظام الخاص بك بتحديد واحد للهوية (UUID) ، مما يمكن أجهزة الكمبيوتر الأخرى التي يمكنك استخدامها بالتعرف عليك. لمعرفة UUID الخاص بك ،يمكنك أن تذهب إلى اللمزامنة --> إضبط المزامنة. يمكنك أن ترى أيضا إعدادات أخرى على هذه الصفحة.',
'Zero Hour': 'ساعة الصفر',
}
| mit |
zpincus/celltool | celltool/numerics/pca.py | 1 | 5436 | # Copyright 2007 Zachary Pincus
# This file is part of CellTool.
#
# CellTool is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
import bisect, numpy
from . import utility_tools
def pca(data):
"""pca(data, axis) -> mean, pcs, norm_pcs, variances, positions, norm_positions
Perform Principal Components Analysis on a set of n-dimensional data points.
The data array must be packed such that 'data[i]' is the ith data point.
This function returns the mean data point, the principal components (packed
such that 'pcs[i]' is the ith principal component), the normalized
principal components (each component is normalized by the data's standard
deviation along that component), the variance each component represents, the
position of each data point along each component, and the position of each
data point along each normalized component."""
data = numpy.asarray(data)
mean = data.mean(axis = 0)
centered = data - mean
flat, data_point_shape = utility_tools.flatten_data(centered)
# could use _flat_pca_svd, but that appears empirically slower...
pcs, variances, stds, positions, norm_positions = _flat_pca_eig(flat)
norm_pcs = utility_tools.fatten_data(pcs * stds[:, numpy.newaxis], data_point_shape)
pcs = utility_tools.fatten_data(pcs, data_point_shape)
return mean, pcs, norm_pcs, variances, positions, norm_positions
def _flat_pca_svd(flat):
u, s, vt = numpy.linalg.svd(flat, full_matrices = 0)
pcs = vt
v = numpy.transpose(vt)
data_count = len(flat)
variances = s**2 / data_count
root_data_count = numpy.sqrt(data_count)
stds = s / root_data_count
positions = u * s
norm_positions = u * root_data_count
return pcs, variances, stds, positions, norm_positions
def _flat_pca_eig(flat):
values, vectors = _symm_eig(flat)
pcs = vectors.transpose()
variances = values / len(flat)
stds = numpy.sqrt(variances)
positions = numpy.dot(flat, vectors)
err = numpy.seterr(divide='ignore', invalid='ignore')
norm_positions = positions / stds
numpy.seterr(**err)
norm_positions = numpy.where(numpy.isfinite(norm_positions), norm_positions, 0)
return pcs, variances, stds, positions, norm_positions
def _symm_eig(a):
"""Return the eigenvectors and eigenvalues of the symmetric matrix a'a. If
a has more columns than rows, then that matrix will be rank-deficient,
and the non-zero eigenvalues and eigenvectors can be more easily extracted
from the matrix aa', from the properties of the SVD:
if a of shape (m,n) has SVD u*s*v', then:
a'a = v*s's*v'
aa' = u*ss'*u'
let s_hat, an array of shape (m,n), be such that s * s_hat = I(m,m)
and s_hat * s = I(n,n). Thus, we can solve for u or v in terms of the other:
v = a'*u*s_hat'
u = a*v*s_hat
"""
m, n = a.shape
if m >= n:
# just return the eigenvalues and eigenvectors of a'a
vecs, vals = _eigh(numpy.dot(a.transpose(), a))
vecs = numpy.where(vecs < 0, 0, vecs)
return vecs, vals
else:
# figure out the eigenvalues and vectors based on aa', which is smaller
sst_diag, u = _eigh(numpy.dot(a, a.transpose()))
# in case due to numerical instabilities we have sst_diag < 0 anywhere,
# peg them to zero
sst_diag = numpy.where(sst_diag < 0, 0, sst_diag)
# now get the inverse square root of the diagonal, which will form the
# main diagonal of s_hat
err = numpy.seterr(divide='ignore', invalid='ignore')
s_hat_diag = 1/numpy.sqrt(sst_diag)
numpy.seterr(**err)
s_hat_diag = numpy.where(numpy.isfinite(s_hat_diag), s_hat_diag, 0)
# s_hat_diag is a list of length m, a'u is (n,m), so we can just use
# numpy's broadcasting instead of matrix multiplication, and only create
# the upper mxm block of a'u, since that's all we'll use anyway...
v = numpy.dot(a.transpose(), u[:,:m]) * s_hat_diag
return sst_diag, v
def _eigh(m):
values, vectors = numpy.linalg.eigh(m)
order = numpy.flipud(values.argsort())
return values[order], vectors[:,order]
def pca_dimensionality_reduce(data, required_variance_explained):
mean, pcs, norm_pcs, variances, positions, norm_positions = pca(data)
total_variance = numpy.add.accumulate(variances / numpy.sum(variances))
num = bisect.bisect(total_variance, required_variance_explained) + 1
return mean, pcs[:num], norm_pcs[:num], variances[:num], numpy.sum(variances), positions[:,:num], norm_positions[:,:num]
def pca_reconstruct(scores, pcs, mean):
# scores and pcs are indexed along axis zero
flat, data_point_shape = utility_tools.flatten_data(pcs)
return mean + utility_tools.fatten_data(numpy.dot(scores, flat), data_point_shape)
def pca_decompose(data, pcs, mean, variances = None):
flat_pcs, data_point_shape = utility_tools.flatten_data(pcs)
flat_data, data_point_shape = utility_tools.flatten_data(data - mean)
projection = numpy.dot(flat_data, flat_pcs.transpose())
if variances is not None:
normalized_projection = projection / numpy.sqrt(variances)
return projection, normalized_projection
else:
return projection | gpl-2.0 |
Vagab0nd/SiCKRAGE | lib3/feedparser/__init__.py | 1 | 2256 | # Copyright 2010-2020 Kurt McKee <[email protected]>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE."""
from __future__ import absolute_import
from __future__ import unicode_literals
from .api import parse
from .datetimes import registerDateHandler
from .exceptions import *
from .util import FeedParserDict
__author__ = 'Kurt McKee <[email protected]>'
__license__ = 'BSD 2-clause'
__version__ = '6.0.0'
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "feedparser/%s +https://github.com/kurtmckee/feedparser/" % __version__
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
| gpl-3.0 |
yacklebeam/corpus-crawler | extra/gen_list.py | 2 | 1786 | # generate image from csv word list
import sys
import pydot
import ConfigParser
def is_grandparent(parent, child_dict):
for child in child_dict[parent]:
if len(child_dict[child]) > 0:
return True
return False
def draw_node_recursive(parent, child_dict, config):
config.set('TREE', parent, (" ".join(child_dict[parent]).lower()))
for child in child_dict[parent]:
if len(child_dict[child]) > 0:
config.set('TREE', child, (" ".join(child_dict[child]).lower()))
filein = open(sys.argv[1], 'r')
filout = open(sys.argv[2], 'w')
child_dict = {}
phrase_list = []
config = ConfigParser.ConfigParser()
config.add_section("TREE")
line_count = 0
for line in filein:
#if line_count > 10000:
# break
csv_list = line.split(',')
phrase = csv_list[0]
if "[" in phrase or "]" in phrase or "-" in phrase:
continue
phrase_list.append(phrase.split('_'))
line_count += 1
total = len(phrase_list)
count = 0
percen = 0
lastp = 0
for phrase_words in phrase_list:
phrase = "_".join(phrase_words)
count += 1
lastp = percen
percen = 100 * count / total
if percen != lastp:
print (str(100 * count / total) + "%")
child_dict[phrase] = []
for child_words in phrase_list:
#if child_words != phrase_words and all(word in child_words for word in phrase_words):
# child_dict[phrase].append("_".join(child_words))
if len(child_words) > len(phrase_words) and phrase_words == child_words[-1*len(phrase_words):]:
child_dict[phrase].append("_".join(child_words))
for parent in child_dict:
#if "=" not in parent and len(child_dict[parent]) > 1:
# config.set('TREE', parent.upper(), " ".join(child_dict[parent]))
if "=" not in parent and is_grandparent(parent, child_dict):
draw_node_recursive(parent, child_dict, config)
config.write(filout)
filout.close()
filein.close() | mit |
weety/rt-thread | tools/file_check.py | 4 | 9678 | #
# Copyright (c) 2006-2021, RT-Thread Development Team
#
# SPDX-License-Identifier: Apache-2.0
#
# Change Logs:
# Date Author Notes
# 2021-04-01 LiuKang the first version
#
import os
import re
import sys
import click
import yaml
import chardet
import logging
import datetime
def init_logger():
log_format = "[%(filename)s %(lineno)d %(levelname)s] %(message)s "
date_format = '%Y-%m-%d %H:%M:%S %a '
logging.basicConfig(level=logging.INFO,
format=log_format,
datefmt=date_format,
)
class CheckOut:
def __init__(self, rtt_repo, rtt_branch):
self.root = os.getcwd()
self.rtt_repo = rtt_repo
self.rtt_branch = rtt_branch
def __exclude_file(self, file_path):
dir_number = file_path.split('/')
ignore_path = file_path
# gets the file path depth.
for i in dir_number:
# current directory.
dir_name = os.path.dirname(ignore_path)
ignore_path = dir_name
# judge the ignore file exists in the current directory.
ignore_file_path = os.path.join(dir_name, ".ignore_format.yml")
if not os.path.exists(ignore_file_path):
continue
try:
with open(ignore_file_path) as f:
ignore_config = yaml.safe_load(f.read())
file_ignore = ignore_config.get("file_path", [])
dir_ignore = ignore_config.get("dir_path", [])
except Exception as e:
logging.error(e)
continue
logging.debug("ignore file path: {}".format(ignore_file_path))
logging.debug("file_ignore: {}".format(file_ignore))
logging.debug("dir_ignore: {}".format(dir_ignore))
try:
# judge file_path in the ignore file.
for file in file_ignore:
if file is not None:
file_real_path = os.path.join(dir_name, file)
if file_real_path == file_path:
logging.info("ignore file path: {}".format(file_real_path))
return 0
file_dir_path = os.path.dirname(file_path)
for _dir in dir_ignore:
if _dir is not None:
dir_real_path = os.path.join(dir_name, _dir)
if file_dir_path.startswith(dir_real_path):
logging.info("ignore dir path: {}".format(dir_real_path))
return 0
except Exception as e:
logging.error(e)
continue
return 1
def get_new_file(self):
file_list = list()
try:
os.system('git remote add rtt_repo {}'.format(self.rtt_repo))
os.system('git fetch rtt_repo')
os.system('git merge rtt_repo/{}'.format(self.rtt_branch))
os.system('git reset rtt_repo/{} --soft'.format(self.rtt_branch))
os.system('git status > git.txt')
except Exception as e:
logging.error(e)
return None
try:
with open('git.txt', 'r') as f:
file_lines = f.readlines()
except Exception as e:
logging.error(e)
return None
file_path = ''
for line in file_lines:
if 'new file' in line:
file_path = line.split('new file:')[1].strip()
logging.info('new file -> {}'.format(file_path))
elif 'deleted' in line:
logging.info('deleted file -> {}'.format(line.split('deleted:')[1].strip()))
elif 'modified' in line:
file_path = line.split('modified:')[1].strip()
logging.info('modified file -> {}'.format(file_path))
else:
continue
result = self.__exclude_file(file_path)
if result != 0:
file_list.append(file_path)
return file_list
class FormatCheck:
def __init__(self, file_list):
self.file_list = file_list
def __check_file(self, file_lines, file_path):
line_num = 1
check_result = True
for line in file_lines:
# check line start
line_start = line.replace(' ', '')
# find tab
if line_start.startswith('\t'):
logging.error("{} line[{}]: please use space replace tab at the start of this line.".format(file_path, line_num))
check_result = False
# check line end
lin_end = line.split('\n')[0]
if lin_end.endswith(' ') or lin_end.endswith('\t'):
logging.error("{} line[{}]: please delete extra space at the end of this line.".format(file_path, line_num))
check_result = False
line_num += 1
return check_result
def check(self):
logging.info("Start to check files format.")
if len(self.file_list) == 0:
logging.warning("There are no files to check format.")
return True
encoding_check_result = True
format_check_fail_files = 0
for file_path in self.file_list:
code = ''
if file_path.endswith(".c") or file_path.endswith(".h"):
try:
with open(file_path, 'rb') as f:
file = f.read()
# get file encoding
code = chardet.detect(file)['encoding']
except Exception as e:
logging.error(e)
else:
continue
if code != 'utf-8' and code != 'ascii':
logging.error("[{0}]: encoding not utf-8, please format it.".format(file_path))
encoding_check_result = False
else:
logging.info('[{0}]: encoding check success.'.format(file_path))
with open(file_path, 'r', encoding = "utf-8") as f:
file_lines = f.readlines()
if not self.__check_file(file_lines, file_path):
format_check_fail_files += 1
if (not encoding_check_result) or (format_check_fail_files != 0):
logging.error("files format check fail.")
return False
logging.info("files format check success.")
return True
class LicenseCheck:
def __init__(self, file_list):
self.file_list = file_list
def check(self):
current_year = datetime.date.today().year
logging.info("current year: {}".format(current_year))
if len(self.file_list) == 0:
logging.warning("There are no files to check license.")
return 0
logging.info("Start to check files license.")
check_result = True
for file_path in self.file_list:
if file_path.endswith(".c") or file_path.endswith(".h"):
try:
with open(file_path, 'r') as f:
file = f.readlines()
except Exception as e:
logging.error(e)
else:
continue
if 'Copyright' in file[1] and 'SPDX-License-Identifier: Apache-2.0' in file[3]:
try:
license_year = re.search(r'2006-\d{4}', file[1]).group()
true_year = '2006-{}'.format(current_year)
if license_year != true_year:
logging.warning("[{0}]: license year: {} is not true: {}, please update.".format(file_path,
license_year,
true_year))
else:
logging.info("[{0}]: license check success.".format(file_path))
except Exception as e:
logging.error(e)
else:
logging.error("[{0}]: license check fail.".format(file_path))
check_result = False
return check_result
@click.group()
@click.pass_context
def cli(ctx):
pass
@cli.command()
@click.option(
'--license',
"check_license",
required=False,
type=click.BOOL,
flag_value=True,
help="Enable File license check.",
)
@click.argument(
'repo',
nargs=1,
type=click.STRING,
default='https://github.com/RT-Thread/rt-thread',
)
@click.argument(
'branch',
nargs=1,
type=click.STRING,
default='master',
)
def check(check_license, repo, branch):
"""
check files license and format.
"""
init_logger()
# get modified files list
checkout = CheckOut(repo, branch)
file_list = checkout.get_new_file()
if file_list is None:
logging.error("checkout files fail")
sys.exit(1)
# check modified files format
format_check = FormatCheck(file_list)
format_check_result = format_check.check()
license_check_result = True
if check_license:
license_check = LicenseCheck(file_list)
license_check_result = license_check.check()
if not format_check_result or not license_check_result:
logging.error("file format check or license check fail.")
sys.exit(1)
logging.info("check success.")
sys.exit(0)
if __name__ == '__main__':
cli()
| apache-2.0 |
HeikeHoffm2/Heike | py/openage/convert/hardcoded/langcodes.py | 46 | 8618 | # language codes, as used in PE file ressources
# this file is used by pefile.py
langcodes = {
1: 'ar',
2: 'bg',
3: 'ca',
4: 'zh_Hans',
5: 'cs',
6: 'da',
7: 'de',
8: 'el',
9: 'en',
10: 'es',
11: 'fi',
12: 'fr',
13: 'he',
14: 'hu',
15: 'is',
16: 'it',
17: 'ja',
18: 'ko',
19: 'nl',
20: 'no',
21: 'pl',
22: 'pt',
23: 'rm',
24: 'ro',
25: 'ru',
26: 'bs',
27: 'sk',
28: 'sq',
29: 'sv',
30: 'th',
31: 'tr',
32: 'ur',
33: 'id',
34: 'uk',
35: 'be',
36: 'sl',
37: 'et',
38: 'lv',
39: 'lt',
40: 'tg',
41: 'fa',
42: 'vi',
43: 'hy',
44: 'az',
45: 'eu',
46: 'dsb',
47: 'mk',
48: 'st',
49: 'ts',
50: 'tn',
51: 've',
52: 'xh',
53: 'zu',
54: 'af',
55: 'ka',
56: 'fo',
57: 'hi',
58: 'mt',
59: 'se',
60: 'ga',
61: 'yi',
62: 'ms',
63: 'kk',
64: 'ky',
65: 'sw',
66: 'tk',
67: 'uz',
68: 'tt',
69: 'bn',
70: 'pa',
71: 'gu',
72: 'or',
73: 'ta',
74: 'te',
75: 'kn',
76: 'ml',
77: 'as',
78: 'mr',
79: 'sa',
80: 'mn',
81: 'bo',
82: 'cy',
83: 'km',
84: 'lo',
85: 'my',
86: 'gl',
87: 'kok',
88: 'mni',
89: 'sd',
90: 'syr',
91: 'si',
92: 'chr',
93: 'iu',
94: 'am',
95: 'tzm',
96: 'ks',
97: 'ne',
98: 'fy',
99: 'ps',
100: 'fil',
101: 'dv',
102: 'bin',
103: 'ff',
104: 'ha',
105: 'ibb',
106: 'yo',
107: 'quz',
108: 'nso',
109: 'ba',
110: 'lb',
111: 'kl',
112: 'ig',
113: 'kr',
114: 'om',
115: 'ti',
116: 'gn',
117: 'haw',
118: 'la',
119: 'so',
120: 'ii',
121: 'pap',
122: 'arn',
124: 'moh',
126: 'br',
128: 'ug',
129: 'mi',
130: 'oc',
131: 'co',
132: 'gsw',
133: 'sah',
134: 'qut',
135: 'rw',
136: 'wo',
140: 'prs',
145: 'gd',
146: 'ku',
1025: 'ar_SA',
1026: 'bg_BG',
1027: 'ca_ES',
1028: 'zh_TW',
1029: 'cs_CZ',
1030: 'da_DK',
1031: 'de_DE',
1032: 'el_GR',
1033: 'en_US',
1034: 'es_ES_tradnl',
1035: 'fi_FI',
1036: 'fr_FR',
1037: 'he_IL',
1038: 'hu_HU',
1039: 'is_IS',
1040: 'it_IT',
1041: 'ja_JP',
1042: 'ko_KR',
1043: 'nl_NL',
1044: 'nb_NO',
1045: 'pl_PL',
1046: 'pt_BR',
1047: 'rm_CH',
1048: 'ro_RO',
1049: 'ru_RU',
1050: 'hr_HR',
1051: 'sk_SK',
1052: 'sq_AL',
1053: 'sv_SE',
1054: 'th_TH',
1055: 'tr_TR',
1056: 'ur_PK',
1057: 'id_ID',
1058: 'uk_UA',
1059: 'be_BY',
1060: 'sl_SI',
1061: 'et_EE',
1062: 'lv_LV',
1063: 'lt_LT',
1064: 'tg_Cyrl_TJ',
1065: 'fa_IR',
1066: 'vi_VN',
1067: 'hy_AM',
1068: 'az_Latn_AZ',
1069: 'eu_ES',
1070: 'hsb_DE',
1071: 'mk_MK',
1072: 'st_ZA',
1073: 'ts_ZA',
1074: 'tn_ZA',
1075: 've_ZA',
1076: 'xh_ZA',
1077: 'zu_ZA',
1078: 'af_ZA',
1079: 'ka_GE',
1080: 'fo_FO',
1081: 'hi_IN',
1082: 'mt_MT',
1083: 'se_NO',
1085: 'yi_Hebr',
1086: 'ms_MY',
1087: 'kk_KZ',
1088: 'ky_KG',
1089: 'sw_KE',
1090: 'tk_TM',
1091: 'uz_Latn_UZ',
1092: 'tt_RU',
1093: 'bn_IN',
1094: 'pa_IN',
1095: 'gu_IN',
1096: 'or_IN',
1097: 'ta_IN',
1098: 'te_IN',
1099: 'kn_IN',
1100: 'ml_IN',
1101: 'as_IN',
1102: 'mr_IN',
1103: 'sa_IN',
1104: 'mn_MN',
1105: 'bo_CN',
1106: 'cy_GB',
1107: 'km_KH',
1108: 'lo_LA',
1109: 'my_MM',
1110: 'gl_ES',
1111: 'kok_IN',
1112: 'mni_IN',
1113: 'sd_Deva_IN',
1114: 'syr_SY',
1115: 'si_LK',
1116: 'chr_Cher_US',
1117: 'iu_Cans_CA',
1118: 'am_ET',
1119: 'tzm_Arab_MA',
1120: 'ks_Arab',
1121: 'ne_NP',
1122: 'fy_NL',
1123: 'ps_AF',
1124: 'fil_PH',
1125: 'dv_MV',
1126: 'bin_NG',
1127: 'fuv_NG',
1128: 'ha_Latn_NG',
1129: 'ibb_NG',
1130: 'yo_NG',
1131: 'quz_BO',
1132: 'nso_ZA',
1133: 'ba_RU',
1134: 'lb_LU',
1135: 'kl_GL',
1136: 'ig_NG',
1137: 'kr_NG',
1138: 'om_ET',
1139: 'ti_ET',
1140: 'gn_PY',
1141: 'haw_US',
1142: 'la_Latn',
1143: 'so_SO',
1144: 'ii_CN',
1145: 'pap_029',
1146: 'arn_CL',
1148: 'moh_CA',
1150: 'br_FR',
1152: 'ug_CN',
1153: 'mi_NZ',
1154: 'oc_FR',
1155: 'co_FR',
1156: 'gsw_FR',
1157: 'sah_RU',
1158: 'qut_GT',
1159: 'rw_RW',
1160: 'wo_SN',
1164: 'prs_AF',
1165: 'plt_MG',
1166: 'zh_yue_HK',
1167: 'tdd_Tale_CN',
1168: 'khb_Talu_CN',
1169: 'gd_GB',
1170: 'ku_Arab_IQ',
1171: 'quc_CO',
1281: 'qps_ploc',
1534: 'qps_ploca',
2049: 'ar_IQ',
2051: 'ca_ES_valencia',
2052: 'zh_CN',
2055: 'de_CH',
2057: 'en_GB',
2058: 'es_MX',
2060: 'fr_BE',
2064: 'it_CH',
2065: 'ja_Ploc_JP',
2067: 'nl_BE',
2068: 'nn_NO',
2070: 'pt_PT',
2072: 'ro_MD',
2073: 'ru_MD',
2074: 'sr_Latn_CS',
2077: 'sv_FI',
2080: 'ur_IN',
2092: 'az_Cyrl_AZ',
2094: 'dsb_DE',
2098: 'tn_BW',
2107: 'se_SE',
2108: 'ga_IE',
2110: 'ms_BN',
2115: 'uz_Cyrl_UZ',
2117: 'bn_BD',
2118: 'pa_Arab_PK',
2121: 'ta_LK',
2128: 'mn_Mong_CN',
2129: 'bo_BT',
2137: 'sd_Arab_PK',
2141: 'iu_Latn_CA',
2143: 'tzm_Latn_DZ',
2144: 'ks_Deva',
2145: 'ne_IN',
2151: 'ff_Latn_SN',
2155: 'quz_EC',
2163: 'ti_ER',
2559: 'qps_plocm',
3073: 'ar_EG',
3076: 'zh_HK',
3079: 'de_AT',
3081: 'en_AU',
3082: 'es_ES',
3084: 'fr_CA',
3098: 'sr_Cyrl_CS',
3131: 'se_FI',
3152: 'mn_Mong_MN',
3167: 'tmz_MA',
3179: 'quz_PE',
4097: 'ar_LY',
4100: 'zh_SG',
4103: 'de_LU',
4105: 'en_CA',
4106: 'es_GT',
4108: 'fr_CH',
4122: 'hr_BA',
4155: 'smj_NO',
4191: 'tzm_Tfng_MA',
5121: 'ar_DZ',
5124: 'zh_MO',
5127: 'de_LI',
5129: 'en_NZ',
5130: 'es_CR',
5132: 'fr_LU',
5146: 'bs_Latn_BA',
5179: 'smj_SE',
6145: 'ar_MA',
6153: 'en_IE',
6154: 'es_PA',
6156: 'fr_MC',
6170: 'sr_Latn_BA',
6203: 'sma_NO',
7169: 'ar_TN',
7177: 'en_ZA',
7178: 'es_DO',
7194: 'sr_Cyrl_BA',
7227: 'sma_SE',
8193: 'ar_OM',
8201: 'en_JM',
8202: 'es_VE',
8204: 'fr_RE',
8218: 'bs_Cyrl_BA',
8251: 'sms_FI',
9217: 'ar_YE',
9225: 'en_029',
9226: 'es_CO',
9228: 'fr_CD',
9242: 'sr_Latn_RS',
9275: 'smn_FI',
10241: 'ar_SY',
10249: 'en_BZ',
10250: 'es_PE',
10252: 'fr_SN',
10266: 'sr_Cyrl_RS',
11265: 'ar_JO',
11273: 'en_TT',
11274: 'es_AR',
11276: 'fr_CM',
11290: 'sr_Latn_ME',
12289: 'ar_LB',
12297: 'en_ZW',
12298: 'es_EC',
12300: 'fr_CI',
12314: 'sr_Cyrl_ME',
13313: 'ar_KW',
13321: 'en_PH',
13322: 'es_CL',
13324: 'fr_ML',
14337: 'ar_AE',
14345: 'en_ID',
14346: 'es_UY',
14348: 'fr_MA',
15361: 'ar_BH',
15369: 'en_HK',
15370: 'es_PY',
15372: 'fr_HT',
16385: 'ar_QA',
16393: 'en_IN',
16394: 'es_BO',
17409: 'ar_Ploc_SA',
17417: 'en_MY',
17418: 'es_SV',
18433: 'ar_145',
18441: 'en_SG',
18442: 'es_HN',
19465: 'en_AE',
19466: 'es_NI',
20489: 'en_BH',
20490: 'es_PR',
21513: 'en_EG',
21514: 'es_US',
22537: 'en_JO',
22538: 'es_419',
23561: 'en_KW',
24585: 'en_TR',
25609: 'en_YE',
25626: 'bs_Cyrl',
26650: 'bs_Latn',
27674: 'sr_Cyrl',
28698: 'sr_Latn',
28731: 'smn',
29740: 'az_Cyrl',
29755: 'sms',
30724: 'zh',
30740: 'nn',
30746: 'bs',
30764: 'az_Latn',
30779: 'sma',
30787: 'uz_Cyrl',
30800: 'mn_Cyrl',
30813: 'iu_Cans',
30815: 'tzm_Tfng',
31748: 'zh_Hant',
31764: 'nb',
31770: 'sr',
31784: 'tg_Cyrl',
31790: 'dsb',
31803: 'smj',
31811: 'uz_Latn',
31814: 'pa_Arab',
31824: 'mn_Mong',
31833: 'sd_Arab',
31836: 'chr_Cher',
31837: 'iu_Latn',
31839: 'tzm_Latn',
31847: 'ff_Latn',
31848: 'ha_Latn',
31890: 'ku_Arab',
65663: 'x_IV_mathan',
66567: 'de_DE_phoneb',
66574: 'hu_HU_tchncl',
66615: 'ka_GE_modern',
133124: 'zh_CN_stroke',
135172: 'zh_SG_stroke',
136196: 'zh_MO_stroke',
197636: 'zh_TW_pronun',
263172: 'zh_TW_radstr',
263185: 'ja_JP_radstr',
265220: 'zh_HK_radstr',
267268: 'zh_MO_radstr'}
| gpl-3.0 |
chris-statzer/knuckle-python | contrib/knuckle/window.py | 1 | 3176 | import logging
import time
import sdl2
logging.basicConfig(level=logging.INFO)
class Window(sdl2.Window):
def __init__(self, width, height, title):
super(Window, self).__init__(width, height, title)
self.log = logging.getLogger('Windw')
self.log.info('++ Creating new window object ({},{})'.format(width,
height))
self.width = width
self.height = height
self.running = True
# Stack stuff
self.last_state = None
self.state_stack = []
# Time stuff
self.current_time = 0
def push_state(self, state_cls, *args, **kwargs):
state = state_cls(self, state_cls.name, *args, **kwargs)
ss_len = len(self.state_stack)
if ss_len > 0:
top_state = self.state_stack[ss_len-1]
top_state.on_lose_focus()
self.log.info('>> Pushing: {}'.format(state.name))
state.on_post_init()
self.state_stack.append(state)
def pop_state(self, state=None):
# Check to selectively remove a state from the stack rather than pop it
if state is not None:
self.state_stack.remove(state)
self.log.info('<< Popped: {}'.format(state.name))
return state
# Call the change in focus
popped_state = self.state_stack.pop()
popped_state.on_lose_focus()
self.log.info('<< Popped: {}'.format(popped_state.name))
return popped_state
def quit(self):
exit()
def run(self):
while self.running:
# calc delta time
ct = self.get_ticks()
dt = ct - self.current_time
self.current_time = ct
# Check if state stack is empty
if len(self.state_stack) == 0:
self.running = False
break
ss_len = len(self.state_stack)
top_state = self.state_stack[ss_len-1]
# Check to see if the top state has changed and call focus
if top_state is not self.last_state:
self.last_state = top_state
top_state.on_gain_focus()
# Send events to or state
events = self.poll_events()
ac = top_state.active_controller
for e in events:
t = e['type']
if t == 'keydown':
ke = ac.keymap.key_to_event(e['key'])
top_state.eb.publish(ke)
elif t == 'keyup':
ke = ac.keymap.key_to_event(e['key'])
# top_state.eb.publish(ke)
elif t == 'quit':
top_state.on_quit()
elif t == 'padbuttondown':
be = inputconfig.button_to_event(e['button'])
top_state.eb.publish(be)
elif t == 'padbuttonup':
be = inputconfig.button_to_event(e['button'])
top_state.eb.publish(be)
# calc delta time and call update
top_state.sched.update(dt)
# Call draw
top_state.on_draw()
| mit |
iuliat/nova | nova/scheduler/filters/utils.py | 41 | 3282 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Bench of utility methods used by filters."""
import collections
from oslo_log import log as logging
import six
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
def aggregate_values_from_key(host_state, key_name):
"""Returns a set of values based on a metadata key for a specific host."""
aggrlist = host_state.aggregates
return {aggr.metadata[key_name]
for aggr in aggrlist
if key_name in aggr.metadata
}
def aggregate_metadata_get_by_host(host_state, key=None):
"""Returns a dict of all metadata based on a metadata key for a specific
host. If the key is not provided, returns a dict of all metadata.
"""
aggrlist = host_state.aggregates
metadata = collections.defaultdict(set)
for aggr in aggrlist:
if key is None or key in aggr.metadata:
for k, v in aggr.metadata.items():
metadata[k].update(x.strip() for x in v.split(','))
return metadata
def validate_num_values(vals, default=None, cast_to=int, based_on=min):
"""Returns a correctly casted value based on a set of values.
This method is useful to work with per-aggregate filters, It takes
a set of values then return the 'based_on'{min/max} converted to
'cast_to' of the set or the default value.
Note: The cast implies a possible ValueError
"""
num_values = len(vals)
if num_values == 0:
return default
if num_values > 1:
LOG.info(_LI("%(num_values)d values found, "
"of which the minimum value will be used."),
{'num_values': num_values})
return based_on([cast_to(val) for val in vals])
def instance_uuids_overlap(host_state, uuids):
"""Tests for overlap between a host_state and a list of uuids.
Returns True if any of the supplied uuids match any of the instance.uuid
values in the host_state.
"""
if isinstance(uuids, six.string_types):
uuids = [uuids]
set_uuids = set(uuids)
# host_state.instances is a dict whose keys are the instance uuids
host_uuids = set(host_state.instances.keys())
return bool(host_uuids.intersection(set_uuids))
def other_types_on_host(host_state, instance_type_id):
"""Tests for overlap between a host_state's instances and an
instance_type_id.
Returns True if there are any instances in the host_state whose
instance_type_id is different than the supplied instance_type_id value.
"""
host_instances = host_state.instances.values()
host_types = set([inst.instance_type_id for inst in host_instances])
inst_set = set([instance_type_id])
return bool(host_types - inst_set)
| apache-2.0 |
emilk/sproxel | distro/common/lib/UserDict.py | 83 | 5991 | """A more or less complete user-defined wrapper around dictionary objects."""
class UserDict:
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(self, dict=None, **kwargs):
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin:
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| bsd-3-clause |
ufal/neuralmonkey | neuralmonkey/tf_utils.py | 1 | 7163 | """A set of helper functions for TensorFlow."""
from typing import Callable, Iterable, List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from neuralmonkey.logging import debug, debug_enabled
# pylint: disable=invalid-name
ShapeSpec = List[int]
# pylint: enable=invalid-name
def _get_current_experiment():
# This is needed to avoid circular imports.
from neuralmonkey.experiment import Experiment
return Experiment.get_current()
def update_initializers(initializers: Iterable[Tuple[str, Callable]]) -> None:
_get_current_experiment().update_initializers(initializers)
def get_initializer(var_name: str,
default: Callable = None) -> Optional[Callable]:
"""Return the initializer associated with the given variable name.
The name of the current variable scope is prepended to the variable name.
This should only be called during model building.
"""
full_name = tf.get_variable_scope().name + "/" + var_name
return _get_current_experiment().get_initializer(full_name, default)
def get_variable(name: str,
shape: ShapeSpec = None,
dtype: tf.DType = None,
initializer: Callable = None,
**kwargs) -> tf.Variable:
"""Get an existing variable with these parameters or create a new one.
This is a wrapper around `tf.get_variable`. The `initializer` parameter is
treated as a default which can be overriden by a call to
`update_initializers`.
This should only be called during model building.
"""
return tf.get_variable(
name=name, shape=shape, dtype=dtype,
initializer=get_initializer(name, initializer),
**kwargs)
def get_shape_list(x: tf.Tensor) -> List[Union[int, tf.Tensor]]:
"""Return list of dims, statically where possible.
Compute the static shape of a tensor. Where the dimension is not static
(e.g. batch or time dimension), symbolic Tensor is returned.
Based on tensor2tensor.
Arguments:
x: The ``Tensor`` to process.
Returns:
A list of integers and Tensors.
"""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, dim in enumerate(static):
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def get_state_shape_invariants(state: tf.Tensor) -> tf.TensorShape:
"""Return the shape invariant of a tensor.
This function computes the loosened shape invariant of a state tensor.
Only invariant dimension is the state size dimension, which is the last.
Based on tensor2tensor.
Arguments:
state: The state tensor.
Returns:
A ``TensorShape`` object with all but the last dimensions set to
``None``.
"""
shape = state.shape.as_list()
for i in range(0, len(shape) - 1):
shape[i] = None
return tf.TensorShape(shape)
def gather_flat(x: tf.Tensor,
indices: tf.Tensor,
batch_size: Union[int, tf.Tensor] = 1,
beam_size: Union[int, tf.Tensor] = 1) -> tf.Tensor:
"""Gather values from the flattened (shape=[batch * beam, ...]) input.
This function expects a flattened tensor with first dimension of size
*batch x beam* elements. Using the given batch and beam size, it reshapes
the input tensor to a tensor of shape ``(batch, beam, ...)`` and gather
the values from it using the index tensor.
Arguments:
x: A flattened ``Tensor`` from which to gather values.
indices: Index tensor.
batch_size: The size of the batch.
beam_size: The size of the beam.
Returns:
The ``Tensor`` of gathered values.
"""
if x.shape.ndims == 0:
return x
shape = [batch_size, beam_size] + get_shape_list(x)[1:]
gathered = tf.gather_nd(tf.reshape(x, shape), indices)
return tf.reshape(gathered, [-1] + shape[2:])
def partial_transpose(x: tf.Tensor, indices: List[int]) -> tf.Tensor:
"""Do a transpose on a subset of tensor dimensions.
Compute a permutation of first k dimensions of a tensor.
Arguments:
x: The ``Tensor`` to transpose.
indices: The permutation of the first k dimensions of ``x``.
Returns:
The transposed tensor.
"""
dims = x.shape.ndims
orig_indices = list(range(dims))
return tf.transpose(x, indices + orig_indices[len(indices):])
def tf_print(tensor: tf.Tensor,
message: str = None,
debug_label: str = None) -> tf.Tensor:
"""Print the value of a tensor to the debug log.
Better than tf.Print, logs to console only when the "tensorval" debug
subject is turned on.
Idea found at: https://stackoverflow.com/a/39649614
Args:
tensor: The tensor whose value to print
Returns:
As tf.Print, this function returns a tensor identical to the input
tensor, with the printing side-effect added.
"""
def print_tensor(x: np.ndarray) -> tf.Tensor:
if message is not None:
debug(
"{}, shape: {}:\n{}".format(message, x.shape, x), debug_label)
else:
debug("Shape: {}\n{}".format(x.shape, x), debug_label)
return x
# To save time, check if debug will print something
if not debug_enabled(debug_label):
return tensor
log_op = tf.py_func(print_tensor, [tensor], [tensor.dtype])[0]
with tf.control_dependencies([log_op]):
res = tf.identity(tensor)
return res
def layer_norm(x: tf.Tensor, epsilon: float = 1e-6) -> tf.Tensor:
"""Layer normalize the tensor x, averaging over the last dimension.
Implementation based on tensor2tensor.
Arguments:
x: The ``Tensor`` to normalize.
epsilon: The smoothing parameter of the normalization.
Returns:
The normalized tensor.
"""
with tf.variable_scope("LayerNorm"):
gamma = get_variable(
name="gamma",
shape=[x.get_shape()[-1]],
dtype=tf.float32,
initializer=tf.ones_initializer())
beta = get_variable(
name="beta",
shape=[x.get_shape()[-1]],
dtype=tf.float32,
initializer=tf.zeros_initializer())
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(
tf.square(x - mean),
axis=[-1],
keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * gamma + beta
def append_tensor(tensor: tf.Tensor,
appendval: tf.Tensor,
axis: int = 0) -> tf.Tensor:
"""Append an ``N``-D Tensor to an ``(N+1)``-D Tensor.
Arguments:
tensor: The original Tensor
appendval: The Tensor to add
axis: Which axis should we use
Returns:
An ``(N+1)``-D Tensor with ``appendval`` on the last position.
"""
return tf.concat([tensor, tf.expand_dims(appendval, axis)], axis)
| bsd-3-clause |
ShujiaHuang/AsmVar | src/AsmvarGenotype/GMM/GMM2D.py | 2 | 18363 | """
================================================
My own Gaussion Mixture Model for SV genotyping.
Learn form scikit-learn
================================================
Author : Shujia Huang
Date : 2014-01-06 14:33:45
"""
import sys
import numpy as np
from scipy import linalg
from sklearn import cluster
from sklearn.base import BaseEstimator
from sklearn.utils.extmath import logsumexp
EPS = np.finfo(float).eps
class GMM ( BaseEstimator ) :
"""
Copy from scikit-learn
"""
def __init__(self, n_components=1, covariance_type='diag', random_state=None, thresh=1e-2, min_covar=1e-3,
n_iter=100, n_init=10, params='wmc', init_params='wmc'):
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.init_means = []
self.init_covars = []
self.category = [] # For genotype
if not covariance_type in ['spherical', 'tied', 'diag', 'full']:
raise ValueError( 'Invalid value for covariance_type: %s' % covariance_type )
if n_init < 1: raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def predict(self, X):
"""
Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""
Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def fit(self, X):
"""
Copy form scikit-learn: gmm.py
Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
X = np.asarray(X, dtype=np.float)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
lowest_bias = np.infty
c1,c2,c3 = '1/1', '0/1', '0/0'
m1,m2,m3 = 0.001 , 0.5 , 1.0
v1,v2,v3 = 0.002, 0.002, 0.002
category = np.array([ [c1,c2,c3],
[c1,c2], [c1,c3], [c2,c3] ,
[c1] , [c2] , [c3] ])
init_means = np.array([ [[ m1],[ m2] , [ m3]],
[[ m1],[ m2]], [[m1],[m3]], [[m2],[m3]],
[[m1]] , [[m2]] , [[m3]] ])
init_covars = np.array([ [[[ v1]],[[ v2]],[[ v3]]],
[[[ v1]],[[ v2]]], [[[ v1]],[[ v3]]], [[[ v2]],[[ v3]]],
[[[ v1]]] , [[[ v2]]] , [[[ v3]]] ])
bestCovars, bestMeans, bestWeights, bestConverged, bestCategory = [], [], [], [], []
for i, (m,v,c) in enumerate( zip(init_means, init_covars, category) ) :
if i == 0 and self.n_components != 3 : continue
if i < 4 and self.n_components == 1 : continue
self.init_means = np.array(m)
self.init_covars = np.array(v)
self.category = np.array(c)
best_params,bias = self.training(X)
if lowest_bias > bias :
lowest_bias = bias
bestCovars = best_params['covars']
bestMeans = best_params['means']
bestWeights = best_params['weights']
bestConverged = best_params['converged']
bestCategory = best_params['category']
if self.n_components == 3 : break
if self.n_components == 2 and i == 3 : break
bestWeights = np.tile(1.0 / self.n_components, self.n_components)
self.covars_ = bestCovars
self.means_ = bestMeans
self.weights_ = bestWeights
self.converged_ = bestConverged
self.category = bestCategory
return self
####
def training(self, X):
max_log_prob = -np.infty
lowest_bias = np.infty
wmin, wmax = 0.8, 1.2 # Factor intervel [wmin, wmax]
for w in np.linspace(wmin, wmax, self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
#self.means_ = cluster.KMeans(n_clusters=self.n_components, random_state=self.random_state).fit(X).cluster_centers_
self.means_ = w * self.init_means
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_= np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
"""
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape :
cv.shape = (1, 1)
self.covars_ = distribute_covar_matrix_to_match_covariance_type(cv, self.covariance_type, self.n_components)
"""
self.covars_ = self.init_covars
# EM algorithms
log_likelihood = []
# reset self.converged_ to False
self.converged_= False
for i in range(self.n_iter):
# Expectation step
curr_log_likelihood, responsibilities = self.score_samples(X)
log_likelihood.append(curr_log_likelihood.sum())
# Check for convergence.
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2]) < self.thresh:
self.converged_ = True
break
#Maximization step
self._do_mstep(X, responsibilities, self.params, self.min_covar)
if self.n_components == 3:
curr_bias =(self.means_[0][0]-self.init_means[0][0])+np.abs(self.means_[1][0]-self.init_means[1][0])+(self.init_means[2][0]-self.means_[2][0])
elif self.n_components == 2:
curr_bias =np.abs(self.means_[0][0] - self.init_means[0][0]) + np.abs(self.init_means[1][0] - self.means_[1][0])
elif self.n_components == 1:
curr_bias =np.abs (self.means_[0][0] - self.init_means[0][0])
else :
print >> sys.stderr, '[ERROR] The companent could only between [1,3]. But yours is ', self.n_components
sys.exit(1)
self.Label2Genotype()
if w == wmin:
max_log_prob = log_likelihood[-1]
best_params = {'weights':self.weights_,
'means':self.means_,
'covars':self.covars_,
'converged':self.converged_,
'category':self.category}
if self.converged_:
lowest_bias = curr_bias
if self.converged_ and lowest_bias > curr_bias:
max_log_prob = log_likelihood[-1]
lowest_bias = curr_bias
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_,
'converged': self.converged_,
'category':self.category}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data." )
# if neendshift :
# self.covars_ = tmp_params['covars']
# self.means_ = tmp_params['means']
# self.weights_ = tmp_params['weights']
# self.converged_ = tmp_params['converged']
# self.category = tmp_params['category']
return best_params, lowest_bias
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""
Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(self, X, responsibilities, weighted_X_sum, inverse_weights,min_covar)
return weights
"""
Here is just for genotyping process
"""
# Decide the different guassion mu(mean) to seperate the genotype
def Label2Genotype(self):
label2genotype = {}
if self.converged_:
if len(self.means_) > 3 :
print >> sys.stderr, 'Do not allow more than 3 components. But you set', len(self.means_)
sys.exit(1)
for label,mu in enumerate(self.means_[:,0]):
best_distance, bestIndx = np.infty, 0
for i,m in enumerate(self.init_means[:,0]):
distance = np.abs(mu - m)
if distance < best_distance:
bestIndx = i
best_distance = distance
label2genotype[label] = self.category[bestIndx]
# Put False if there are more than one 'label' points to the same 'genotype'
g2c = {v:k for k,v in label2genotype.items()}
if len(label2genotype) != len(g2c): self.converged_ = False
else :
label2genotype = { label: './.' for label in range( self.n_components ) }
return label2genotype
def Mendel(self, genotype, sample2col, family):
ngIndx = []
m,n,num = 0.0,0.0,0 # m is match; n is not match
for k,v in family.items():
#if v[0] not in sample2col or v[1] not in sample2col : continue
if k not in sample2col or v[0] not in sample2col or v[1] not in sample2col: continue
if k not in sample2col :
print >> sys.stderr, 'The sample name is not in vcf file! ', k
sys.exit(1)
# c1 is son; c2 and c3 are the parents
c1,c2,c3 = genotype[ sample2col[k] ], genotype[ sample2col[v[0]] ], genotype[ sample2col[v[1]] ]
if c1 == './.' or c2 == './.' or c3 == './.': continue
num += 1;
ng = False
if c2 == c3 :
if c2 == '0/0' or c2 == '1/1' :
if c1 == c2 : m += 1
else :
n += 1
ng = True
else : # c2 == '0/1' and c3 == '0/1'
m += 1
elif c2 == '0/1' and c3 == '1/1' :
if c1 == '0/0' :
n += 1
ng = True
else : m += 1
elif c2 == '0/1' and c3 == '0/0' :
if c1 == '1/1' :
n += 1
ng = True
else : m += 1
elif c2 == '1/1' and c3 == '0/1' :
if c1 == '0/0' :
n += 1
ng = True
else : m += 1
elif c2 == '1/1' and c3 == '0/0' :
if c1 == '1/1' or c1 == '0/0':
n += 1
ng = True
else : m += 1
elif c2 == '0/0' and c3 == '0/1' :
if c1 == '1/1' :
n += 1
ng = True
else : m += 1
elif c2 == '0/0' and c3 == '1/1' :
if c1 == '0/0' or c1 == '1/1' :
n += 1
ng = True
else : m += 1
if ng :
ngIndx.append(sample2col[k])
ngIndx.append(sample2col[v[0]])
ngIndx.append(sample2col[v[1]])
return m,n,num,set(ngIndx)
###
def log_multivariate_normal_density(X, means, covars, covariance_type='full'):
"""
Log probability for full covariance matrices.
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([])
if X.shape[1] != means.shape[1]:
raise ValueError('The shape of X is not compatible with self')
log_multivariate_normal_density_dict = {
'full' : _log_multivariate_normal_density_full
}
return log_multivariate_normal_density_dict[covariance_type]( X, means, covars )
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""
Log probability for full covariance matrices.
"""
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
solve_triangular = linalg.solve
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probabily stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def distribute_covar_matrix_to_match_covariance_type( tied_cv, covariance_type, n_components) :
"""
Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
# Underflow Errors in doing post * X.T are not important
np.seterr(under='ignore')
avg_cv = np.dot(post * X.T, X) / (post.sum() + 10 * EPS)
mu = gmm.means_[c][np.newaxis]
cv[c] = (avg_cv - np.dot(mu.T, mu) + min_covar * np.eye(n_features))
return cv
_covar_mstep_funcs = { 'full': _covar_mstep_full }
| mit |
marcua/qurk_experiments | qurkexp/estimation/models.py | 1 | 3728 | from django.db import models
# The kind of item we are estimating properties of
class Kind(models.Model):
name = models.TextField(unique=True)
def __str__(self):
return self.name
# Items whose properties we are estimating
class Item(models.Model):
kind = models.ForeignKey(Kind, related_name="items")
ident = models.TextField() # a compact unique identifier
data = models.TextField() # app-specific information
class Meta:
unique_together = ("kind", "ident")
def __getitem__(self, prop):
for ann in self.annotations.filter(prop=prop):
return ann.val
return super(Item, self).__getitem__(prop)
def __str__(self):
return "%s: %s (%d)" % (self.kind, self.ident, self.id)
# An item property annotation
class Annotation(models.Model):
item = models.ForeignKey(Item, related_name="annotations")
prop = models.TextField() # property name
val = models.TextField() # property value
class Meta:
unique_together = ("item", "prop")
# An experiment to estimate some property of a collection of items
class EstExp(models.Model):
name = models.TextField(unique=True)
kind = models.ForeignKey(Kind, related_name="exps")
prop = models.TextField() # property we're estimating
# Maps an experiment to all the Items in that experiment.
# Not a ManyToMany relationship so that an item can appear in an
# experiment more than once.
class ExpItem(models.Model):
exp = models.ForeignKey(EstExp, related_name="item_ptrs")
item = models.ForeignKey(Item, related_name="exp_ptrs")
# A run on MTurk of an estimation experiment. You set a batch size
# and a display style so that we can test various modes of estimating
# different samples.
class ExpRun(models.Model):
exp = models.ForeignKey(EstExp, related_name="runs")
name = models.TextField(unique=True)
batch_size = models.IntegerField()
num_batches = models.IntegerField()
display_style = models.TextField()
assignments = models.IntegerField()
price = models.FloatField()
def __str__(self):
retval = "Experiment '%s', run '%s', batch size %d, display style %s"
retval = retval % (self.exp.name, self.name, self.batch_size, self.display_style)
return retval
# The values of the experiment properties to estimate. A single
# worker can estimate multiple values at once. For example, not only
# estimate how many blue squares, but also how many pink ones.
class RunVal(models.Model):
run = models.ForeignKey(ExpRun, related_name="vals")
val = models.TextField()
# A single batch of items that is displayed to a worker at once
class RunBatch(models.Model):
run = models.ForeignKey(ExpRun, related_name="batches")
items_ptrs = models.ManyToManyField(ExpItem, related_name="batches")
# The metadata for the response of a single worker
class ValRespMeta(models.Model):
batch = models.ForeignKey(RunBatch)
aid = models.CharField(max_length=128)
hid = models.CharField(max_length=128)
wid = models.CharField(max_length=128)
accept_time = models.DateTimeField(null=False)
submit_time = models.DateTimeField(null=False)
seconds_spent = models.FloatField(default=-1)
screen_height = models.IntegerField(default=-1)
screen_width = models.IntegerField(default=-1)
# A single worker's estimation of a particular property value fraction
class ValRespAns(models.Model):
vrm = models.ForeignKey(ValRespMeta)
val = models.TextField()
count = models.IntegerField()
# A single worker's label for an item's value
class ValRespValue(models.Model):
vrm = models.ForeignKey(ValRespMeta)
item = models.ForeignKey(ExpItem)
val = models.TextField()
| bsd-3-clause |
synth3tk/the-blue-alliance | tests/test_event_get_timezone_id.py | 4 | 3832 | import unittest2
from google.appengine.ext import testbed
from datafeeds.usfirst_event_details_parser import UsfirstEventDetailsParser
from helpers.event_helper import EventHelper
class TestEventGetTimezoneId(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_urlfetch_stub()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def test_2012ct_no_location(self):
with open('test_data/usfirst_html/usfirst_event_details_2012ct.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
event['location'] = None
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), None)
def test_2012ct_bad_location(self):
with open('test_data/usfirst_html/usfirst_event_details_2012ct.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
event['location'] = "somewhere on mars"
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), None)
def test_2012ct(self):
with open('test_data/usfirst_html/usfirst_event_details_2012ct.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), 'America/New_York')
def test_2013flbr(self):
with open('test_data/usfirst_html/usfirst_event_details_2013flbr.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), 'America/New_York')
def test_2013casj(self):
with open('test_data/usfirst_html/usfirst_event_details_2013casj.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), 'America/Los_Angeles')
def test_2001sj(self):
with open('test_data/usfirst_html/usfirst_event_details_2001ca2.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), 'America/Los_Angeles')
def test_2005is(self):
with open('test_data/usfirst_html/usfirst_event_details_2005is.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), 'Asia/Jerusalem')
def test_2005or(self):
with open('test_data/usfirst_html/usfirst_event_details_2005or.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), 'America/Los_Angeles')
def test_1997il(self):
with open('test_data/usfirst_html/usfirst_event_details_1997il.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), 'America/Chicago')
def test_2002sj(self):
with open('test_data/usfirst_html/usfirst_event_details_2002sj.html', 'r') as f:
event, _ = UsfirstEventDetailsParser.parse(f.read())
self.assertEqual(EventHelper.get_timezone_id(event['location'], '{}{}'.format(event['year'], event['event_short'])), 'America/Los_Angeles')
| mit |
bohlian/erpnext | erpnext/stock/report/batch_wise_balance_history/batch_wise_balance_history.py | 32 | 3377 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cint, getdate
def execute(filters=None):
if not filters: filters = {}
float_precision = cint(frappe.db.get_default("float_precision")) or 3
columns = get_columns(filters)
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_batch_map(filters, float_precision)
data = []
for item in sorted(iwb_map):
for wh in sorted(iwb_map[item]):
for batch in sorted(iwb_map[item][wh]):
qty_dict = iwb_map[item][wh][batch]
if qty_dict.opening_qty or qty_dict.in_qty or qty_dict.out_qty or qty_dict.bal_qty:
data.append([item, item_map[item]["item_name"], item_map[item]["description"], wh, batch,
flt(qty_dict.opening_qty, float_precision), flt(qty_dict.in_qty, float_precision),
flt(qty_dict.out_qty, float_precision), flt(qty_dict.bal_qty, float_precision),
item_map[item]["stock_uom"]
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = [_("Item") + ":Link/Item:100"] + [_("Item Name") + "::150"] + [_("Description") + "::150"] + \
[_("Warehouse") + ":Link/Warehouse:100"] + [_("Batch") + ":Link/Batch:100"] + [_("Opening Qty") + ":Float:90"] + \
[_("In Qty") + ":Float:80"] + [_("Out Qty") + ":Float:80"] + [_("Balance Qty") + ":Float:90"] + \
[_("UOM") + "::90"]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
frappe.throw(_("'From Date' is required"))
if filters.get("to_date"):
conditions += " and posting_date <= '%s'" % filters["to_date"]
else:
frappe.throw(_("'To Date' is required"))
return conditions
#get all details
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select item_code, batch_no, warehouse,
posting_date, actual_qty
from `tabStock Ledger Entry`
where docstatus < 2 and ifnull(batch_no, '') != '' %s order by item_code, warehouse""" %
conditions, as_dict=1)
def get_item_warehouse_batch_map(filters, float_precision):
sle = get_stock_ledger_entries(filters)
iwb_map = {}
from_date = getdate(filters["from_date"])
to_date = getdate(filters["to_date"])
for d in sle:
iwb_map.setdefault(d.item_code, {}).setdefault(d.warehouse, {})\
.setdefault(d.batch_no, frappe._dict({
"opening_qty": 0.0, "in_qty": 0.0, "out_qty": 0.0, "bal_qty": 0.0
}))
qty_dict = iwb_map[d.item_code][d.warehouse][d.batch_no]
if d.posting_date < from_date:
qty_dict.opening_qty = flt(qty_dict.opening_qty, float_precision) \
+ flt(d.actual_qty, float_precision)
elif d.posting_date >= from_date and d.posting_date <= to_date:
if flt(d.actual_qty) > 0:
qty_dict.in_qty = flt(qty_dict.in_qty, float_precision) + flt(d.actual_qty, float_precision)
else:
qty_dict.out_qty = flt(qty_dict.out_qty, float_precision) \
+ abs(flt(d.actual_qty, float_precision))
qty_dict.bal_qty = flt(qty_dict.bal_qty, float_precision) + flt(d.actual_qty, float_precision)
return iwb_map
def get_item_details(filters):
item_map = {}
for d in frappe.db.sql("select name, item_name, description, stock_uom from tabItem", as_dict=1):
item_map.setdefault(d.name, d)
return item_map
| gpl-3.0 |
yousafsyed/casperjs | bin/Lib/idlelib/ColorDelegator.py | 12 | 10467 | import time
import re
import keyword
import builtins
from tkinter import *
from idlelib.Delegator import Delegator
from idlelib.configHandler import idleConf
DEBUG = False
def any(name, alternates):
"Return a named group pattern matching list of alternates."
return "(?P<%s>" % name + "|".join(alternates) + ")"
def make_pat():
kw = r"\b" + any("KEYWORD", keyword.kwlist) + r"\b"
builtinlist = [str(name) for name in dir(builtins)
if not name.startswith('_') and \
name not in keyword.kwlist]
# self.file = open("file") :
# 1st 'file' colorized normal, 2nd as builtin, 3rd as string
builtin = r"([^.'\"\\#]\b|^)" + any("BUILTIN", builtinlist) + r"\b"
comment = any("COMMENT", [r"#[^\n]*"])
stringprefix = r"(\br|u|ur|R|U|UR|Ur|uR|b|B|br|Br|bR|BR|rb|rB|Rb|RB)?"
sqstring = stringprefix + r"'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = stringprefix + r'"[^"\\\n]*(\\.[^"\\\n]*)*"?'
sq3string = stringprefix + r"'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?"
dq3string = stringprefix + r'"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?'
string = any("STRING", [sq3string, dq3string, sqstring, dqstring])
return kw + "|" + builtin + "|" + comment + "|" + string +\
"|" + any("SYNC", [r"\n"])
prog = re.compile(make_pat(), re.S)
idprog = re.compile(r"\s+(\w+)", re.S)
asprog = re.compile(r".*?\b(as)\b")
class ColorDelegator(Delegator):
def __init__(self):
Delegator.__init__(self)
self.prog = prog
self.idprog = idprog
self.asprog = asprog
self.LoadTagDefs()
def setdelegate(self, delegate):
if self.delegate is not None:
self.unbind("<<toggle-auto-coloring>>")
Delegator.setdelegate(self, delegate)
if delegate is not None:
self.config_colors()
self.bind("<<toggle-auto-coloring>>", self.toggle_colorize_event)
self.notify_range("1.0", "end")
else:
# No delegate - stop any colorizing
self.stop_colorizing = True
self.allow_colorizing = False
def config_colors(self):
for tag, cnf in self.tagdefs.items():
if cnf:
self.tag_configure(tag, **cnf)
self.tag_raise('sel')
def LoadTagDefs(self):
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs = {
"COMMENT": idleConf.GetHighlight(theme, "comment"),
"KEYWORD": idleConf.GetHighlight(theme, "keyword"),
"BUILTIN": idleConf.GetHighlight(theme, "builtin"),
"STRING": idleConf.GetHighlight(theme, "string"),
"DEFINITION": idleConf.GetHighlight(theme, "definition"),
"SYNC": {'background':None,'foreground':None},
"TODO": {'background':None,'foreground':None},
"BREAK": idleConf.GetHighlight(theme, "break"),
"ERROR": idleConf.GetHighlight(theme, "error"),
# The following is used by ReplaceDialog:
"hit": idleConf.GetHighlight(theme, "hit"),
}
if DEBUG: print('tagdefs',self.tagdefs)
def insert(self, index, chars, tags=None):
index = self.index(index)
self.delegate.insert(index, chars, tags)
self.notify_range(index, index + "+%dc" % len(chars))
def delete(self, index1, index2=None):
index1 = self.index(index1)
self.delegate.delete(index1, index2)
self.notify_range(index1)
after_id = None
allow_colorizing = True
colorizing = False
def notify_range(self, index1, index2=None):
self.tag_add("TODO", index1, index2)
if self.after_id:
if DEBUG: print("colorizing already scheduled")
return
if self.colorizing:
self.stop_colorizing = True
if DEBUG: print("stop colorizing")
if self.allow_colorizing:
if DEBUG: print("schedule colorizing")
self.after_id = self.after(1, self.recolorize)
close_when_done = None # Window to be closed when done colorizing
def close(self, close_when_done=None):
if self.after_id:
after_id = self.after_id
self.after_id = None
if DEBUG: print("cancel scheduled recolorizer")
self.after_cancel(after_id)
self.allow_colorizing = False
self.stop_colorizing = True
if close_when_done:
if not self.colorizing:
close_when_done.destroy()
else:
self.close_when_done = close_when_done
def toggle_colorize_event(self, event):
if self.after_id:
after_id = self.after_id
self.after_id = None
if DEBUG: print("cancel scheduled recolorizer")
self.after_cancel(after_id)
if self.allow_colorizing and self.colorizing:
if DEBUG: print("stop colorizing")
self.stop_colorizing = True
self.allow_colorizing = not self.allow_colorizing
if self.allow_colorizing and not self.colorizing:
self.after_id = self.after(1, self.recolorize)
if DEBUG:
print("auto colorizing turned",\
self.allow_colorizing and "on" or "off")
return "break"
def recolorize(self):
self.after_id = None
if not self.delegate:
if DEBUG: print("no delegate")
return
if not self.allow_colorizing:
if DEBUG: print("auto colorizing is off")
return
if self.colorizing:
if DEBUG: print("already colorizing")
return
try:
self.stop_colorizing = False
self.colorizing = True
if DEBUG: print("colorizing...")
t0 = time.perf_counter()
self.recolorize_main()
t1 = time.perf_counter()
if DEBUG: print("%.3f seconds" % (t1-t0))
finally:
self.colorizing = False
if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"):
if DEBUG: print("reschedule colorizing")
self.after_id = self.after(1, self.recolorize)
if self.close_when_done:
top = self.close_when_done
self.close_when_done = None
top.destroy()
def recolorize_main(self):
next = "1.0"
while True:
item = self.tag_nextrange("TODO", next)
if not item:
break
head, tail = item
self.tag_remove("SYNC", head, tail)
item = self.tag_prevrange("SYNC", head)
if item:
head = item[1]
else:
head = "1.0"
chars = ""
next = head
lines_to_get = 1
ok = False
while not ok:
mark = next
next = self.index(mark + "+%d lines linestart" %
lines_to_get)
lines_to_get = min(lines_to_get * 2, 100)
ok = "SYNC" in self.tag_names(next + "-1c")
line = self.get(mark, next)
##print head, "get", mark, next, "->", repr(line)
if not line:
return
for tag in self.tagdefs:
self.tag_remove(tag, mark, next)
chars = chars + line
m = self.prog.search(chars)
while m:
for key, value in m.groupdict().items():
if value:
a, b = m.span(key)
self.tag_add(key,
head + "+%dc" % a,
head + "+%dc" % b)
if value in ("def", "class"):
m1 = self.idprog.match(chars, b)
if m1:
a, b = m1.span(1)
self.tag_add("DEFINITION",
head + "+%dc" % a,
head + "+%dc" % b)
elif value == "import":
# color all the "as" words on same line, except
# if in a comment; cheap approximation to the
# truth
if '#' in chars:
endpos = chars.index('#')
else:
endpos = len(chars)
while True:
m1 = self.asprog.match(chars, b, endpos)
if not m1:
break
a, b = m1.span(1)
self.tag_add("KEYWORD",
head + "+%dc" % a,
head + "+%dc" % b)
m = self.prog.search(chars, m.end())
if "SYNC" in self.tag_names(next + "-1c"):
head = next
chars = ""
else:
ok = False
if not ok:
# We're in an inconsistent state, and the call to
# update may tell us to stop. It may also change
# the correct value for "next" (since this is a
# line.col string, not a true mark). So leave a
# crumb telling the next invocation to resume here
# in case update tells us to leave.
self.tag_add("TODO", next)
self.update()
if self.stop_colorizing:
if DEBUG: print("colorizing stopped")
return
def removecolors(self):
for tag in self.tagdefs:
self.tag_remove(tag, "1.0", "end")
def main():
from idlelib.Percolator import Percolator
root = Tk()
root.wm_protocol("WM_DELETE_WINDOW", root.quit)
text = Text(background="white")
text.pack(expand=1, fill="both")
text.focus_set()
p = Percolator(text)
d = ColorDelegator()
p.insertfilter(d)
root.mainloop()
if __name__ == "__main__":
main()
| mit |
latinproject/booktype-latin | lib/booki/editor/management/commands/reportweekly.py | 1 | 9125 | # This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import datetime
from django.conf import settings
now = datetime.datetime.now()
week_ago = None
import os.path
import re
def getUsers():
"""Get users who registered today."""
from django.contrib.auth.models import User
return User.objects.filter(date_joined__gte = week_ago)
return None
def getBooks():
"""Get books created today."""
from booki.editor.models import Book
return Book.objects.filter(created__gte = week_ago)
def getGroups():
"""Get groups created today."""
from booki.editor.models import BookiGroup
return BookiGroup.objects.filter(created__gte = week_ago)
def getHistory():
from booki.editor.models import BookHistory
from django.db.models import Count
from booki.editor.models import Book
history = []
for books2 in BookHistory.objects.filter(modified__gte = week_ago).values('book').annotate(Count('book')).order_by("-book__count"):
book = Book.objects.get(pk=books2['book'])
history.append((book, [h.chapter_history for h in BookHistory.objects.filter(book__id=books2['book'],
chapter_history__isnull=False,
modified__gte = week_ago)]))
return history
def getInfo():
from django.contrib.auth.models import User
from booki.editor.models import Book, Attachment, BookiGroup
from django.db import connection
numOfUsers = len(User.objects.all())
numOfBooks = len(Book.objects.all())
numOfGroups = len(BookiGroup.objects.all())
attachmentsSize = 0
for at in Attachment.objects.all():
try:
attachmentsSize += at.attachment.size
except:
pass
cursor = connection.cursor()
cursor.execute("SELECT pg_database_size(%s)", [settings.DATABASES['default']['NAME']]);
databaseSize = cursor.fetchone()[0]
return {'users_num': numOfUsers,
'books_num': numOfBooks,
'groups_num': numOfGroups,
'attachments_size': attachmentsSize,
'database_size': databaseSize}
def getChart():
from django.db.models import Count
from booki.editor import models
import math
hours = []
max_num = 0
for days in range(7):
that_day = week_ago + datetime.timedelta(days=days)
hours.append([0]*7)
for x in models.BookHistory.objects.filter(modified__year = that_day.year, modified__month = that_day.month, modified__day = that_day.day).extra({'modified': 'EXTRACT (HOUR FROM modified)'}).values('modified').annotate(count=Count('id')):
cnt = x['count']
hours[days][int(x['modified']/4)] += cnt
if cnt > max_num: max_num = cnt
for y in range(7):
for x in range(6):
try:
hours[y][x] = int(float(hours[y][x])/float(max_num)*200.0)
except ZeroDivisionError:
hours[y][x] = 0
return hours
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--send-email',
action='store_true',
dest='send_email',
default=False,
help='Send email to admin'),
make_option('--days',
action='store',
dest='days',
default=0,
help='N days ago')
)
def handle(self, *args, **options):
global now
global week_ago
now = now - datetime.timedelta(days=int(options['days']))
week_ago = now - datetime.timedelta(days=7)
users = getUsers()
books = getBooks()
groups = getGroups()
history = getHistory()
info = getInfo()
chart = getChart()
try:
BOOKTYPE_NAME = settings.BOOKI_NAME
except AttributeError:
BOOKTYPE_NAME = 'Booktype'
from booki.editor import models
from django.db.models import Count
active_books = [models.Book.objects.get(id=b['book']) for b in models.BookHistory.objects.filter(modified__gte = week_ago).values('book').annotate(Count('book')).order_by("-book__count")[:10]]
# render result
from django.contrib.auth.models import User
active_users = [User.objects.get(id=b['user']) for b in models.BookHistory.objects.filter(modified__gte = week_ago).values('user').annotate(Count('user')).order_by("-user__count")[:10]]
from django import template
t = template.loader.get_template('booktype_weekly_report.html')
con = t.render(template.Context({"users": users,
"books": books,
"groups": groups,
"history": history,
"report_date": now,
"info": info,
"booki_name": BOOKTYPE_NAME,
"week_ago": week_ago,
"now_date": now,
"active_books": active_books,
"active_users": active_users,
"site_url": settings.BOOKI_URL
}))
if options['send_email']:
from django.core.mail import EmailMultiAlternatives
try:
REPORT_EMAIL_USER = settings.REPORT_EMAIL_USER
except AttributeError:
REPORT_EMAIL_USER = '[email protected]'
emails = [em[1] for em in settings.ADMINS]
subject = 'Weekly report for %s (%s)' % (BOOKTYPE_NAME, now.strftime("%A %d %B %Y"))
text_content = con
html_content = con
msg = EmailMultiAlternatives(subject, text_content, REPORT_EMAIL_USER, emails)
msg.attach_alternative(html_content, "text/html")
# Make graph
import ImageFont, ImageDraw, Image
from booki.editor import models as ed
import os.path
font = ImageFont.truetype("%s/management/commands/linear-by-braydon-fuller.otf" % os.path.dirname(ed.__file__), 12)
text_size = font.getsize("P")
image = Image.new("RGB", (40+(7*6*10), 200+2+12), (255, 255, 255))
draw = ImageDraw.Draw(image)
bottom_padding = text_size[1]+12
for y in range(7):
for x in range(6):
value = chart[y][x]
if value > 0:
draw.rectangle((20+y*60+x*10, image.size[1]-bottom_padding-2-value, 20+y*60+(1+x)*10, image.size[1]-bottom_padding-2), fill=(95, 158, 237))
draw.line((0, image.size[1]-bottom_padding) + (image.size[0], image.size[1]-bottom_padding), fill = (128, 128, 128))
draw.line((0, image.size[1]-bottom_padding-1) + (image.size[0], image.size[1]-bottom_padding-1), fill = (128, 128, 128))
for x in range(8):
draw.ellipse((20+x*60-3, image.size[1]-bottom_padding-3, 20+x*60+3, image.size[1]-bottom_padding+3), fill = (128, 128,128))
draw.rectangle((20+x*60-2+30, image.size[1]-bottom_padding-2, 20+x*60+30+2, image.size[1]-bottom_padding+2), fill = (128, 128,128))
def _width(s):
return font.getsize(s)[0]
def _day(n):
s = (week_ago+datetime.timedelta(days=n)).strftime('%d.%m')
draw.text((20+n*60-_width(s)/2, image.size[1]-bottom_padding+6), s, font=font, fill=(0,0,0))
for d in range(8):
_day(d)
import StringIO
output = StringIO.StringIO()
image.save(output, 'PNG')
data = output.getvalue()
from email.MIMEImage import MIMEImage
msgImage = MIMEImage(data)
msgImage.add_header('Content-ID', '<graph.png>')
msg.attach(msgImage)
msg.send()
else:
pass
#print con
| agpl-3.0 |
bluemini/kuma | vendor/packages/translate/storage/test_ts2.py | 24 | 8528 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Tests for Qt Linguist storage class
Reference implementation & tests:
gitorious:qt5-tools/src/qttools/tests/auto/linguist/lconvert/data
"""
from lxml import etree
from translate.misc.multistring import multistring
from translate.storage import test_base, ts2 as ts
from translate.storage.placeables import parse, xliff
from translate.storage.placeables.lisa import xml_to_strelem
TS_NUMERUS = """<?xml version='1.0' encoding='utf-8'?>
<!DOCTYPE TS>
<TS version="2.1">
<context>
<name>Dialog2</name>
<message numerus="yes">
<source>%n files</source>
<translation type="unfinished">
<numerusform></numerusform>
</translation>
</message>
<message id="this_is_some_id" numerus="yes">
<source>%n cars</source>
<translation type="unfinished">
<numerusform></numerusform>
</translation>
</message>
<message>
<source>Age: %1</source>
<translation type="unfinished"></translation>
</message>
<message id="this_is_another_id">
<source>func3</source>
<translation type="unfinished"></translation>
</message>
</context>
</TS>
"""
xliffparsers = []
for attrname in dir(xliff):
attr = getattr(xliff, attrname)
if type(attr) is type and \
attrname not in ('XLIFFPlaceable') and \
hasattr(attr, 'parse') and \
attr.parse is not None:
xliffparsers.append(attr.parse)
def rich_parse(s):
return parse(s, xliffparsers)
class TestTSUnit(test_base.TestTranslationUnit):
UnitClass = ts.tsunit
class TestTSfile(test_base.TestTranslationStore):
StoreClass = ts.tsfile
def test_basic(self):
tsfile = ts.tsfile()
assert tsfile.units == []
tsfile.addsourceunit("Bla")
assert len(tsfile.units) == 1
newfile = ts.tsfile.parsestring(str(tsfile))
print(str(tsfile))
assert len(newfile.units) == 1
assert newfile.units[0].source == "Bla"
assert newfile.findunit("Bla").source == "Bla"
assert newfile.findunit("dit") is None
def test_source(self):
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("Concept")
tsunit.source = "Term"
newfile = ts.tsfile.parsestring(str(tsfile))
print(str(tsfile))
assert newfile.findunit("Concept") is None
assert newfile.findunit("Term") is not None
def test_target(self):
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("Concept")
tsunit.target = "Konsep"
newfile = ts.tsfile.parsestring(str(tsfile))
print(str(tsfile))
assert newfile.findunit("Concept").target == "Konsep"
def test_plurals(self):
"""Test basic plurals"""
tsfile = ts.tsfile()
tsunit = tsfile.addsourceunit("File(s)")
tsunit.target = [u"Leêr", u"Leêrs"]
newfile = ts.tsfile.parsestring(str(tsfile))
print(str(tsfile))
checkunit = newfile.findunit("File(s)")
assert checkunit.target == [u"Leêr", u"Leêrs"]
assert checkunit.hasplural()
def test_language(self):
"""Check that we can get and set language and sourcelanguage
in the header"""
tsstr = '''<!DOCTYPE TS>
<TS version="2.0" language="fr" sourcelanguage="de">
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert tsfile.gettargetlanguage() == 'fr'
assert tsfile.getsourcelanguage() == 'de'
tsfile.settargetlanguage('pt_BR')
assert 'pt_BR' in str(tsfile)
assert tsfile.gettargetlanguage() == 'pt-br'
# We convert en_US to en
tsstr = '''<!DOCTYPE TS>
<TS version="2.0" language="fr" sourcelanguage="en_US">
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert tsfile.getsourcelanguage() == 'en'
def test_edit(self):
"""test editing works well"""
tsstr = '''<?xml version='1.0' encoding='utf-8'?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<source>ObsoleteString</source>
<translation type="obsolete">Groepen</translation>
</message>
<message>
<source>SourceString</source>
<translation>TargetString</translation>
</message>
</context>
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
tsfile.units[1].settarget('TestTarget')
tsfile.units[1].markfuzzy(True)
newtsstr = tsstr.decode('utf-8').replace(
'>TargetString', ' type="unfinished">TestTarget'
).encode('utf-8')
assert newtsstr == str(tsfile)
def test_locations(self):
"""test that locations work well"""
tsstr = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation>Asztali beállítások (Alapértelmezett)</translation>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation>Stílus és paletta alapú kiválasztása az asztali beállításokban.</translation>
</message>
</context>
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr)
assert len(tsfile.units) == 2
assert tsfile.units[0].getlocations() == ['../tools/qtconfig/mainwindow.cpp:+202']
assert tsfile.units[1].getlocations() == ['+5']
def test_merge_with_fuzzies(self):
"""test that merge with fuzzy works well"""
tsstr1 = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation type="unfinished">Asztali beállítások (Alapértelmezett)</translation>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation>Stílus és paletta alapú kiválasztása az asztali beállításokban.</translation>
</message>
</context>
</TS>
'''
tsstr2 = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE TS>
<TS version="2.0" language="hu">
<context>
<name>MainWindow</name>
<message>
<location filename="../tools/qtconfig/mainwindow.cpp" line="+202"/>
<source>Desktop Settings (Default)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Choose style and palette based on your desktop settings.</source>
<translation type="unfinished"/>
</message>
</context>
</TS>
'''
tsfile = ts.tsfile.parsestring(tsstr1)
tsfile2 = ts.tsfile.parsestring(tsstr2)
assert len(tsfile.units) == 2
assert len(tsfile2.units) == 2
tsfile2.units[0].merge(tsfile.units[0]) # fuzzy
tsfile2.units[1].merge(tsfile.units[1]) # not fuzzy
assert tsfile2.units[0].isfuzzy()
assert not tsfile2.units[1].isfuzzy()
def test_getid(self):
"""test that getid works well"""
tsfile = ts.tsfile.parsestring(TS_NUMERUS)
assert tsfile.units[0].getid() == "Dialog2%n files"
assert tsfile.units[1].getid() == "Dialog2\nthis_is_some_id%n cars"
assert tsfile.units[3].getid() == "Dialog2\nthis_is_another_idfunc3"
def test_backnforth(self):
"""test that ts files are read and output properly"""
tsfile = ts.tsfile.parsestring(TS_NUMERUS)
assert str(tsfile) == TS_NUMERUS
| mpl-2.0 |
mgogoulos/libcloud | libcloud/common/gogrid.py | 28 | 6395 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import time
from libcloud.utils.py3 import b
from libcloud.common.types import InvalidCredsError, LibcloudError
from libcloud.common.types import MalformedResponseError
from libcloud.common.base import ConnectionUserAndKey, JsonResponse
from libcloud.compute.base import NodeLocation
HOST = 'api.gogrid.com'
PORTS_BY_SECURITY = {True: 443, False: 80}
API_VERSION = '1.8'
__all__ = [
"GoGridResponse",
"GoGridConnection",
"GoGridIpAddress",
"BaseGoGridDriver",
]
class GoGridResponse(JsonResponse):
def __init__(self, *args, **kwargs):
self.driver = BaseGoGridDriver
super(GoGridResponse, self).__init__(*args, **kwargs)
def success(self):
if self.status == 403:
raise InvalidCredsError('Invalid credentials', self.driver)
if self.status == 401:
raise InvalidCredsError('API Key has insufficient rights',
self.driver)
if not self.body:
return None
try:
return self.parse_body()['status'] == 'success'
except ValueError:
raise MalformedResponseError('Malformed reply',
body=self.body,
driver=self.driver)
def parse_error(self):
try:
return self.parse_body()["list"][0]["message"]
except (ValueError, KeyError):
return None
class GoGridConnection(ConnectionUserAndKey):
"""
Connection class for the GoGrid driver
"""
host = HOST
responseCls = GoGridResponse
def add_default_params(self, params):
params["api_key"] = self.user_id
params["v"] = API_VERSION
params["format"] = 'json'
params["sig"] = self.get_signature(self.user_id, self.key)
return params
def get_signature(self, key, secret):
""" create sig from md5 of key + secret + time """
m = hashlib.md5(b(key + secret + str(int(time.time()))))
return m.hexdigest()
def request(self, action, params=None, data='', headers=None, method='GET',
raw=False):
return super(GoGridConnection, self).request(action, params, data,
headers, method, raw)
class GoGridIpAddress(object):
"""
IP Address
"""
def __init__(self, id, ip, public, state, subnet):
self.id = id
self.ip = ip
self.public = public
self.state = state
self.subnet = subnet
class BaseGoGridDriver(object):
"""GoGrid has common object model for services they
provide, like locations and IP, so keep handling of
these things in a single place."""
name = "GoGrid"
def _get_ip(self, element):
return element.get('ip').get('ip')
def _to_ip(self, element):
ip = GoGridIpAddress(id=element['id'],
ip=element['ip'],
public=element['public'],
subnet=element['subnet'],
state=element["state"]["name"])
ip.location = self._to_location(element['datacenter'])
return ip
def _to_ips(self, object):
return [self._to_ip(el)
for el in object['list']]
def _to_location(self, element):
# pylint: disable=no-member
location = NodeLocation(id=element['id'],
name=element['name'],
country="US",
driver=self.connection.driver)
return location
def _to_locations(self, object):
return [self._to_location(el)
for el in object['list']]
def ex_list_ips(self, **kwargs):
"""Return list of IP addresses assigned to
the account.
:keyword public: set to True to list only
public IPs or False to list only
private IPs. Set to None or not specify
at all not to filter by type
:type public: ``bool``
:keyword assigned: set to True to list only addresses
assigned to servers, False to list unassigned
addresses and set to None or don't set at all
not no filter by state
:type assigned: ``bool``
:keyword location: filter IP addresses by location
:type location: :class:`NodeLocation`
:rtype: ``list`` of :class:`GoGridIpAddress`
"""
params = {}
if "public" in kwargs and kwargs["public"] is not None:
params["ip.type"] = {True: "Public",
False: "Private"}[kwargs["public"]]
if "assigned" in kwargs and kwargs["assigned"] is not None:
params["ip.state"] = {True: "Assigned",
False: "Unassigned"}[kwargs["assigned"]]
if "location" in kwargs and kwargs['location'] is not None:
params['datacenter'] = kwargs['location'].id
# pylint: disable=no-member
response = self.connection.request('/api/grid/ip/list', params=params)
ips = self._to_ips(response.object)
return ips
def _get_first_ip(self, location=None):
ips = self.ex_list_ips(public=True, assigned=False, location=location)
try:
return ips[0].ip
except IndexError:
# pylint: disable=no-member
raise LibcloudError('No public unassigned IPs left',
self.driver)
| apache-2.0 |
gdsfactory/gdsfactory | pp/components/spiral_inner_io.py | 1 | 10100 | """ bends with grating couplers inside the spiral
maybe: need to add grating coupler loopback as well
"""
from typing import Optional, Tuple
import numpy as np
import pp
from pp.component import Component
from pp.components.bend_circular import bend_circular, bend_circular180
from pp.components.bend_euler import bend_euler, bend_euler180
from pp.components.straight import straight
from pp.cross_section import get_waveguide_settings
from pp.routing.manhattan import round_corners
from pp.snap import snap_to_grid
from pp.types import ComponentFactory, Number
def get_bend_port_distances(bend: Component) -> Tuple[float, float]:
p0, p1 = bend.ports.values()
return abs(p0.x - p1.x), abs(p0.y - p1.y)
@pp.cell_with_validator
def spiral_inner_io(
N: int = 6,
x_straight_inner_right: float = 150.0,
x_straight_inner_left: float = 150.0,
y_straight_inner_top: float = 50.0,
y_straight_inner_bottom: float = 10.0,
grating_spacing: float = 127.0,
dx: float = 3.0,
dy: float = 3.0,
bend90_function: ComponentFactory = bend_circular,
bend180_function: ComponentFactory = bend_circular180,
width: float = 0.5,
width_grating_coupler: float = 0.5,
straight_factory: ComponentFactory = straight,
taper: Optional[ComponentFactory] = None,
length: Optional[float] = None,
waveguide: str = "strip",
**kwargs
) -> Component:
"""Spiral with ports inside the spiral circle.
Args:
N: number of loops
x_straight_inner_right:
x_straight_inner_left:
y_straight_inner_top:
y_straight_inner_bottom:
grating_spacing:
dx: center to center x-spacing
dy: center to center y-spacing
bend90_function
bend180_function
straight_factory: straight function
taper: taper function
length:
"""
waveguide_settings = get_waveguide_settings(waveguide, **kwargs)
width = waveguide_settings.get("width")
taper_length = waveguide_settings.get("taper_length", 10.0)
if length:
if bend180_function == bend_circular180:
x_straight_inner_left = get_straight_length(
length=length,
spiral_function=spiral_inner_io,
N=N,
x_straight_inner_right=x_straight_inner_right,
x_straight_inner_left=x_straight_inner_left,
y_straight_inner_top=y_straight_inner_top,
y_straight_inner_bottom=y_straight_inner_bottom,
grating_spacing=grating_spacing,
dx=dx,
dy=dy,
straight_factory=straight,
bend90_function=bend_euler,
bend180_function=bend_euler180,
)
else:
x_straight_inner_left = get_straight_length(
length=length,
spiral_function=spiral_inner_io_euler,
N=N,
x_straight_inner_right=x_straight_inner_right,
x_straight_inner_left=x_straight_inner_left,
y_straight_inner_top=y_straight_inner_top,
y_straight_inner_bottom=y_straight_inner_bottom,
grating_spacing=grating_spacing,
dx=dx,
dy=dy,
)
_bend180 = pp.call_if_func(bend180_function, **waveguide_settings)
_bend90 = pp.call_if_func(bend90_function, **waveguide_settings)
rx, ry = get_bend_port_distances(_bend90)
_, rx180 = get_bend_port_distances(_bend180) # rx180, second arg since we rotate
component = pp.Component()
# gc_port_lbl = "W0"
# gc1 = _gc.ref(port_id=gc_port_lbl, position=(0, 0), rotation=-90)
# gc2 = _gc.ref(port_id=gc_port_lbl, position=(grating_spacing, 0), rotation=-90)
# component.add([gc1, gc2])
p1 = pp.Port(
name="S0",
midpoint=(0, y_straight_inner_top),
orientation=270,
width=width,
layer=pp.LAYER.WG,
)
p2 = pp.Port(
name="S1",
midpoint=(grating_spacing, y_straight_inner_top),
orientation=270,
width=width,
layer=pp.LAYER.WG,
)
taper = pp.components.taper(
width1=width_grating_coupler,
width2=_bend180.ports["W0"].width,
length=taper_length + y_straight_inner_top - 15 - 35,
)
taper_ref1 = component.add_ref(taper)
taper_ref1.connect("2", p1)
taper_ref2 = component.add_ref(taper)
taper_ref2.connect("2", p2)
component.absorb(taper_ref1)
component.absorb(taper_ref2)
component.add_port(name="S0", port=taper_ref1.ports["1"])
component.add_port(name="S1", port=taper_ref2.ports["1"])
# Create manhattan path going from west grating to westest port of bend 180
_pt = np.array(p1.position)
pts_w = [_pt]
for i in range(N):
y1 = y_straight_inner_top + ry + (2 * i + 1) * dy
x2 = grating_spacing + 2 * rx + x_straight_inner_right + (2 * i + 1) * dx
y3 = -y_straight_inner_bottom - ry - (2 * i + 3) * dy
x4 = -x_straight_inner_left - (2 * i + 1) * dx
if i == N - 1:
x4 = x4 - rx180 + dx
_pt1 = np.array([_pt[0], y1])
_pt2 = np.array([x2, _pt1[1]])
_pt3 = np.array([_pt2[0], y3])
_pt4 = np.array([x4, _pt3[1]])
_pt5 = np.array([_pt4[0], 0])
_pt = _pt5
pts_w += [_pt1, _pt2, _pt3, _pt4, _pt5]
route_west = round_corners(
pts_w, bend_factory=_bend90, straight_factory=straight_factory, taper=taper
)
component.add(route_west.references)
# Add loop back
bend180_ref = _bend180.ref(port_id="W1", position=route_west.ports[1], rotation=90)
component.add(bend180_ref)
component.absorb(bend180_ref)
# Create manhattan path going from east grating to eastest port of bend 180
_pt = np.array(p2.position)
pts_e = [_pt]
for i in range(N):
y1 = y_straight_inner_top + ry + (2 * i) * dy
x2 = grating_spacing + 2 * rx + x_straight_inner_right + 2 * i * dx
y3 = -y_straight_inner_bottom - ry - (2 * i + 2) * dy
x4 = -x_straight_inner_left - (2 * i) * dx
_pt1 = np.array([_pt[0], y1])
_pt2 = np.array([x2, _pt1[1]])
_pt3 = np.array([_pt2[0], y3])
_pt4 = np.array([x4, _pt3[1]])
_pt5 = np.array([_pt4[0], 0])
_pt = _pt5
pts_e += [_pt1, _pt2, _pt3, _pt4, _pt5]
route_east = round_corners(
pts_e, bend_factory=_bend90, straight_factory=straight_factory, taper=taper
)
component.add(route_east.references)
length = route_east.length + route_west.length + _bend180.length
component.length = snap_to_grid(length + 2 * y_straight_inner_top)
return component
@pp.cell_with_validator
def spiral_inner_io_euler(
bend90_function: ComponentFactory = bend_euler,
bend180_function: ComponentFactory = bend_euler180,
**kwargs
) -> Component:
"""Spiral with euler bends."""
return spiral_inner_io(
bend90_function=bend90_function, bend180_function=bend180_function, **kwargs
)
@pp.cell_with_validator
def spirals_nested(bend_radius: Number = 100) -> Component:
component = pp.Component()
c = spiral_inner_io(
N=42,
y_straight_inner_top=10.0,
y_straight_inner_bottom=5700.0,
x_straight_inner_right=2000.0,
x_straight_inner_left=20.0,
bend_radius=bend_radius,
)
c1 = spiral_inner_io(
N=42,
y_straight_inner_top=10.0,
y_straight_inner_bottom=10.0,
x_straight_inner_right=0.0,
x_straight_inner_left=120.0,
bend_radius=bend_radius,
)
c2 = spiral_inner_io(
N=42,
y_straight_inner_top=10.0,
y_straight_inner_bottom=2000.0,
x_straight_inner_right=0.0,
x_straight_inner_left=120.0,
bend_radius=bend_radius,
)
# for _c in [c, c1, c2]:
# print(_c.info["length"])
component.add(c.ref(position=(0, 0)))
component.add(c1.ref(position=(1150, -850)))
component.add(c2.ref(position=(1150, -2850)))
return component
def get_straight_length(
length: float, spiral_function: ComponentFactory, **kwargs
) -> Number:
"""Returns y_spiral to achieve a particular spiral length"""
x0 = 50
x1 = 400
kwargs.update({"x_straight_inner_left": x0})
s0 = spiral_function(**kwargs)
kwargs.update({"x_straight_inner_left": x1})
s1 = spiral_function(**kwargs)
p = np.polyfit(np.array([x0, x1]), np.array([s0.length, s1.length]), deg=1)
return (length - p[1]) / p[0]
# @cell
# def spiral_inner_io_with_gratings(
# spiral=spiral_inner_io, grating_coupler=pp.components.grating_coupler_elliptical_te, **kwargs
# ):
# spiral = pp.call_if_func(spiral, **kwargs)
# grating_coupler = pp.call_if_func(grating_coupler)
# return add_gratings_and_loop_back(spiral, grating_coupler=grating_coupler)
if __name__ == "__main__":
# c = spiral_inner_io(x_straight_inner_left=800)
c = spiral_inner_io_euler(length=20e3)
# c = spiral_inner_io_euler(length=20e3)
# c = spiral_inner_io_euler(length_spiral=20e3, width=0.4)
# c = spiral_inner_io_euler(length_spiral=60e3, width=0.4)
# print(c.name)
# print(c.settings)
# c = add_gratings_and_loop_back(c)
# c = spirals_nested()
c.show(show_ports=True)
# c = spiral_inner_io_euler(width=1)
# from pp.routing import add_fiber_array
# c = spiral_inner_io_euler(length_spiral=4, width=1)
# cc = pp.routing.add_fiber_array(c)
# print(c.length_spiral)
# print(get_straight_length(2, spiral_inner_io_euler))
# print(get_straight_length(4, spiral_inner_io_euler))
# print(get_straight_length(6, spiral_inner_io_euler))
# c = spiral_inner_io()
# c = spiral_inner_io_euler(y_straight_inner_top=-11)
# c = spiral_inner_io_euler(bend_radius=20, width=0.2)
# c = spiral_inner_io_euler(bend_radius=20, width=0.2, y_straight_inner_top=200)
# c = reticle_mockup()
# c = spiral_inner_io()
# c = spiral_inner_io(bend_radius=20, width=0.2)
# c = spirals_nested()
| mit |
Varriount/Colliberation | libs/construct/lib/container.py | 5 | 6148 | """
Various containers.
"""
def recursion_lock(retval, lock_name = "__recursion_lock__"):
def decorator(func):
def wrapper(self, *args, **kw):
if getattr(self, lock_name, False):
return retval
setattr(self, lock_name, True)
try:
return func(self, *args, **kw)
finally:
setattr(self, lock_name, False)
wrapper.__name__ = func.__name__
return wrapper
return decorator
class Container(dict):
"""
A generic container of attributes.
Containers are the common way to express parsed data.
"""
__slots__ = ["__keys_order__"]
def __init__(self, **kw):
object.__setattr__(self, "__keys_order__", [])
for k, v in kw.items():
self[k] = v
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setitem__(self, key, val):
if key not in self:
self.__keys_order__.append(key)
dict.__setitem__(self, key, val)
def __delitem__(self, key):
dict.__delitem__(self, key)
self.__keys_order__.remove(key)
__delattr__ = __delitem__
__setattr__ = __setitem__
def clear(self):
dict.clear(self)
del self.__keys_order__[:]
def pop(self, key, *default):
val = dict.pop(self, key, *default)
self.__keys_order__.remove(key)
return val
def popitem(self):
k, v = dict.popitem(self)
self.__keys_order__.remove(k)
return k, v
def update(self, seq, **kw):
if hasattr(seq, "keys"):
for k in seq.keys():
self[k] = seq[k]
else:
for k, v in seq:
self[k] = v
dict.update(self, kw)
def copy(self):
inst = self.__class__()
inst.update(self.iteritems())
return inst
__update__ = update
__copy__ = copy
def __iter__(self):
return iter(self.__keys_order__)
iterkeys = __iter__
def itervalues(self):
return (self[k] for k in self.__keys_order__)
def iteritems(self):
return ((k, self[k]) for k in self.__keys_order__)
def keys(self):
return self.__keys_order__
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
@recursion_lock("<...>")
def __pretty_str__(self, nesting = 1, indentation = " "):
attrs = []
ind = indentation * nesting
for k, v in self.iteritems():
if not k.startswith("_"):
text = [ind, k, " = "]
if hasattr(v, "__pretty_str__"):
text.append(v.__pretty_str__(nesting + 1, indentation))
else:
text.append(repr(v))
attrs.append("".join(text))
if not attrs:
return "%s()" % (self.__class__.__name__,)
attrs.insert(0, self.__class__.__name__ + ":")
return "\n".join(attrs)
__str__ = __pretty_str__
class FlagsContainer(Container):
"""
A container providing pretty-printing for flags.
Only set flags are displayed.
"""
@recursion_lock("<...>")
def __pretty_str__(self, nesting = 1, indentation = " "):
attrs = []
ind = indentation * nesting
for k in self.keys():
v = self.__dict__[k]
if not k.startswith("_") and v:
attrs.append(ind + k)
if not attrs:
return "%s()" % (self.__class__.__name__,)
attrs.insert(0, self.__class__.__name__+ ":")
return "\n".join(attrs)
class ListContainer(list):
"""
A container for lists.
"""
__slots__ = ["__recursion_lock__"]
def __str__(self):
return self.__pretty_str__()
@recursion_lock("[...]")
def __pretty_str__(self, nesting = 1, indentation = " "):
if not self:
return "[]"
ind = indentation * nesting
lines = ["["]
for elem in self:
lines.append("\n")
lines.append(ind)
if hasattr(elem, "__pretty_str__"):
lines.append(elem.__pretty_str__(nesting + 1, indentation))
else:
lines.append(repr(elem))
lines.append("\n")
lines.append(indentation * (nesting - 1))
lines.append("]")
return "".join(lines)
class LazyContainer(object):
__slots__ = ["subcon", "stream", "pos", "context", "_value"]
def __init__(self, subcon, stream, pos, context):
self.subcon = subcon
self.stream = stream
self.pos = pos
self.context = context
self._value = NotImplemented
def __eq__(self, other):
try:
return self._value == other._value
except AttributeError:
return False
def __ne__(self, other):
return not (self == other)
def __str__(self):
return self.__pretty_str__()
def __pretty_str__(self, nesting = 1, indentation = " "):
if self._value is NotImplemented:
text = "<unread>"
elif hasattr(self._value, "__pretty_str__"):
text = self._value.__pretty_str__(nesting, indentation)
else:
text = str(self._value)
return "%s: %s" % (self.__class__.__name__, text)
def read(self):
self.stream.seek(self.pos)
return self.subcon._parse(self.stream, self.context)
def dispose(self):
self.subcon = None
self.stream = None
self.context = None
self.pos = None
def _get_value(self):
if self._value is NotImplemented:
self._value = self.read()
return self._value
value = property(_get_value)
has_value = property(lambda self: self._value is not NotImplemented)
if __name__ == "__main__":
c = Container(x=5)
c.y = 8
c.z = 9
c.w = 10
c.foo = 5
print (c)
| mit |
alexryndin/ambari | ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py | 3 | 3758 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
from resource_management import *
class ECSClient(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
self.setup_config(env)
self.setup_hadoop_env(env)
def createdirs(self, env):
self.create_dirs(env)
def status(self, env):
raise ClientComponentHasNoStatus()
def setup_config(self, env):
import params
env.set_params(params)
stackversion = params.stack_version_unformatted
XmlConfig("core-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.hdfs_user,
group=params.user_group,
only_if=format("ls {hadoop_conf_dir}"))
XmlConfig("hdfs-site.xml",
conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['hdfs-site'],
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
owner=params.hdfs_user,
group=params.user_group,
only_if=format("ls {hadoop_conf_dir}"))
File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
mode=0644,
content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar")
)
def setup_hadoop_env(self, env):
import params
env.set_params(params)
stackversion = params.stack_version_unformatted
if params.security_enabled:
tc_owner = "root"
else:
tc_owner = params.hdfs_user
# create /etc/hadoop
Directory(params.hadoop_dir, mode=0755)
# write out hadoop-env.sh, but only if the directory exists
if os.path.exists(params.hadoop_conf_dir):
File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
group=params.user_group,
content=InlineTemplate(params.hadoop_env_sh_template))
# Create tmp dir for java.io.tmpdir
# Handle a situation when /tmp is set to noexec
Directory(params.hadoop_java_io_tmpdir,
owner=params.hdfs_user,
group=params.user_group,
mode=0777
)
def create_dirs(self,env):
import params
env.set_params(params)
params.HdfsResource(params.hdfs_tmp_dir,
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
mode=0777
)
params.HdfsResource(params.smoke_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.smoke_user,
mode=params.smoke_hdfs_user_mode
)
params.HdfsResource(None,
action="execute"
)
if __name__ == "__main__":
ECSClient().execute()
| apache-2.0 |
Revanth47/addons-server | src/olympia/api/tests/test_commands.py | 5 | 4643 | import os.path
from StringIO import StringIO
from django.core.management import call_command
from django.conf import settings
from olympia.amo.tests import TestCase, user_factory
from olympia.api.models import APIKey
class TestRevokeAPIKeys(TestCase):
def setUp(self):
self.csv_path = os.path.join(
settings.ROOT, 'src', 'olympia', 'api', 'tests', 'assets',
'test-revoke-api-keys.csv')
def test_api_key_does_not_exist(self):
user = user_factory()
# The test csv does not contain an entry for this user.
apikey = APIKey.new_jwt_credentials(user=user)
old_secret = apikey.secret
stdout = StringIO()
call_command('revoke_api_keys', self.csv_path, stdout=stdout)
stdout.seek(0)
output = stdout.readlines()
assert output[0] == (
'Ignoring APIKey user:12345:666, it does not exist.\n')
assert output[1] == (
'Ignoring APIKey user:67890:333, it does not exist.\n')
# APIKey is still active, secret hasn't changed, there are no
# additional APIKeys.
apikey.reload()
assert apikey.secret == old_secret
assert apikey.is_active
assert APIKey.objects.filter(user=user).count() == 1
def test_api_key_already_inactive(self):
user = user_factory(id=67890)
# The test csv contains an entry with this user and the "right" secret.
right_secret = (
'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26')
apikey = APIKey.objects.create(
key='user:{}:{}'.format(user.pk, '333'), secret=right_secret,
user=user, is_active=False) # inactive APIKey.
stdout = StringIO()
call_command('revoke_api_keys', self.csv_path, stdout=stdout)
stdout.seek(0)
output = stdout.readlines()
assert output[0] == (
'Ignoring APIKey user:12345:666, it does not exist.\n')
assert output[1] == (
'Ignoring APIKey user:67890:333, it does not exist.\n')
# APIKey is still active, secret hasn't changed, there are no
# additional APIKeys.
apikey.reload()
assert apikey.secret == right_secret
assert not apikey.is_active
assert APIKey.objects.filter(user=user).count() == 1
def test_api_key_has_wrong_secret(self):
user = user_factory(id=12345)
# The test csv contains an entry with this user and the "wrong" secret.
right_secret = (
'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26')
apikey = APIKey.objects.create(
key='user:{}:{}'.format(user.pk, '666'), secret=right_secret,
user=user, is_active=True)
stdout = StringIO()
call_command('revoke_api_keys', self.csv_path, stdout=stdout)
stdout.seek(0)
output = stdout.readlines()
assert output[0] == (
'Ignoring APIKey user:12345:666, secret differs.\n')
assert output[1] == (
'Ignoring APIKey user:67890:333, it does not exist.\n')
# APIKey is still active, secret hasn't changed, there are no
# additional APIKeys.
apikey.reload()
assert apikey.secret == right_secret
assert apikey.is_active
assert APIKey.objects.filter(user=user).count() == 1
def test_api_key_should_be_revoked(self):
user = user_factory(id=67890)
# The test csv contains an entry with this user and the "right" secret.
right_secret = (
'ab2228544a061cb2af21af97f637cc58e1f8340196f1ddc3de329b5974694b26')
apikey = APIKey.objects.create(
key='user:{}:{}'.format(user.pk, '333'), secret=right_secret,
user=user, is_active=True)
stdout = StringIO()
call_command('revoke_api_keys', self.csv_path, stdout=stdout)
stdout.seek(0)
output = stdout.readlines()
assert output[0] == (
'Ignoring APIKey user:12345:666, it does not exist.\n')
assert output[1] == (
'Revoked APIKey user:67890:333.\n')
assert output[2] == (
'Ignoring APIKey garbage, it does not exist.\n')
assert output[3] == (
'Done. Revoked 1 keys out of 3 entries.\n')
# APIKey is still active, secret hasn't changed, there are no
# additional APIKeys.
apikey.reload()
assert apikey.secret == right_secret
assert not apikey.is_active
assert APIKey.objects.filter(user=user).count() == 2
assert APIKey.objects.filter(user=user, is_active=True).count() == 1
| bsd-3-clause |
edx/ecommerce | ecommerce/extensions/analytics/migrations/0001_initial.py | 1 | 3904 | # -*- coding: utf-8 -*-
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProductRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_views', models.PositiveIntegerField(default=0, verbose_name='Views')),
('num_basket_additions', models.PositiveIntegerField(default=0, verbose_name='Basket Additions')),
('num_purchases', models.PositiveIntegerField(default=0, db_index=True, verbose_name='Purchases')),
('score', models.FloatField(default=0.0, verbose_name='Score')),
],
options={
'ordering': ['-num_purchases'],
'verbose_name_plural': 'Product records',
'verbose_name': 'Product record',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserProductView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
],
options={
'verbose_name_plural': 'User product views',
'verbose_name': 'User product view',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_product_views', models.PositiveIntegerField(default=0, verbose_name='Product Views')),
('num_basket_additions', models.PositiveIntegerField(default=0, verbose_name='Basket Additions')),
('num_orders', models.PositiveIntegerField(default=0, db_index=True, verbose_name='Orders')),
('num_order_lines', models.PositiveIntegerField(default=0, db_index=True, verbose_name='Order Lines')),
('num_order_items', models.PositiveIntegerField(default=0, db_index=True, verbose_name='Order Items')),
('total_spent', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Total Spent')),
('date_last_order', models.DateTimeField(blank=True, verbose_name='Last Order Date', null=True)),
('user', models.OneToOneField(verbose_name='User', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'User records',
'verbose_name': 'User record',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserSearch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('query', models.CharField(max_length=255, db_index=True, verbose_name='Search term')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('user', models.ForeignKey(verbose_name='User', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'User search queries',
'verbose_name': 'User search query',
'abstract': False,
},
bases=(models.Model,),
),
]
| agpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.