repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
felixfontein/ansible | test/support/integration/plugins/modules/postgresql_db.py | 53 | 23381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_db
short_description: Add or remove PostgreSQL databases from a remote host.
description:
- Add or remove PostgreSQL databases from a remote host.
version_added: '0.6'
options:
name:
description:
- Name of the database to add or remove
type: str
required: true
aliases: [ db ]
port:
description:
- Database port to connect (if needed)
type: int
default: 5432
aliases:
- login_port
owner:
description:
- Name of the role to set as owner of the database
type: str
template:
description:
- Template used to create the database
type: str
encoding:
description:
- Encoding of the database
type: str
lc_collate:
description:
- Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
type: str
lc_ctype:
description:
- Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
is used as template.
type: str
session_role:
description:
- Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
version_added: '2.8'
state:
description:
- The database state.
- C(present) implies that the database should be created if necessary.
- C(absent) implies that the database should be removed if present.
- C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
pg_dump returns rc 1 in this case.
- C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
- The format of the backup will be detected based on the target name.
- Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
- Supported formats for dump and restore include C(.sql) and C(.tar)
type: str
choices: [ absent, dump, present, restore ]
default: present
target:
description:
- File to back up or restore from.
- Used when I(state) is C(dump) or C(restore).
type: path
version_added: '2.4'
target_opts:
description:
- Further arguments for pg_dump or pg_restore.
- Used when I(state) is C(dump) or C(restore).
type: str
version_added: '2.4'
maintenance_db:
description:
- The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
type: str
default: postgres
version_added: '2.5'
conn_limit:
description:
- Specifies the database connection limit.
type: str
version_added: '2.8'
tablespace:
description:
- The tablespace to set for the database
U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
- If you want to move the database back to the default tablespace,
explicitly set this to pg_default.
type: path
version_added: '2.9'
dump_extra_args:
description:
- Provides additional arguments when I(state) is C(dump).
- Cannot be used with dump-file-format-related arguments like ``--format=d``.
type: str
version_added: '2.10'
seealso:
- name: CREATE DATABASE reference
description: Complete reference of the CREATE DATABASE command documentation.
link: https://www.postgresql.org/docs/current/sql-createdatabase.html
- name: DROP DATABASE reference
description: Complete reference of the DROP DATABASE command documentation.
link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
- name: pg_dump reference
description: Complete reference of pg_dump documentation.
link: https://www.postgresql.org/docs/current/app-pgdump.html
- name: pg_restore reference
description: Complete reference of pg_restore documentation.
link: https://www.postgresql.org/docs/current/app-pgrestore.html
- module: postgresql_tablespace
- module: postgresql_info
- module: postgresql_ping
notes:
- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
author: "Ansible Core Team"
extends_documentation_fragment:
- postgres
'''
EXAMPLES = r'''
- name: Create a new database with name "acme"
postgresql_db:
name: acme
# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
- name: Create a new database with name "acme" and specific encoding and locale # settings.
postgresql_db:
name: acme
encoding: UTF-8
lc_collate: de_DE.UTF-8
lc_ctype: de_DE.UTF-8
template: template0
# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
postgresql_db:
name: acme
conn_limit: "100"
- name: Dump an existing database to a file
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
- name: Dump an existing database to a file excluding the test table
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
dump_extra_args: --exclude-table=test
- name: Dump an existing database to a file (with compression)
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql.gz
- name: Dump a single schema for an existing database
postgresql_db:
name: acme
state: dump
target: /tmp/acme.sql
target_opts: "-n public"
# Note: In the example below, if database foo exists and has another tablespace
# the tablespace will be changed to foo. Access to the database will be locked
# until the copying of database files is finished.
- name: Create a new database called foo in tablespace bar
postgresql_db:
name: foo
tablespace: bar
'''
RETURN = r'''
executed_commands:
description: List of commands which tried to run.
returned: always
type: list
sample: ["CREATE DATABASE acme"]
version_added: '2.10'
'''
import os
import subprocess
import traceback
try:
import psycopg2
import psycopg2.extras
except ImportError:
HAS_PSYCOPG2 = False
else:
HAS_PSYCOPG2 = True
import ansible.module_utils.postgres as pgutils
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_native
executed_commands = []
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def set_owner(cursor, db, owner):
query = 'ALTER DATABASE %s OWNER TO "%s"' % (
pg_quote_identifier(db, 'database'),
owner)
executed_commands.append(query)
cursor.execute(query)
return True
def set_conn_limit(cursor, db, conn_limit):
query = "ALTER DATABASE %s CONNECTION LIMIT %s" % (
pg_quote_identifier(db, 'database'),
conn_limit)
executed_commands.append(query)
cursor.execute(query)
return True
def get_encoding_id(cursor, encoding):
query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
cursor.execute(query, {'encoding': encoding})
return cursor.fetchone()['encoding_id']
def get_db_info(cursor, db):
query = """
SELECT rolname AS owner,
pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
spcname AS tablespace
FROM pg_database
JOIN pg_roles ON pg_roles.oid = pg_database.datdba
JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
WHERE datname = %(db)s
"""
cursor.execute(query, {'db': db})
return cursor.fetchone()
def db_exists(cursor, db):
query = "SELECT * FROM pg_database WHERE datname=%(db)s"
cursor.execute(query, {'db': db})
return cursor.rowcount == 1
def db_delete(cursor, db):
if db_exists(cursor, db):
query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
executed_commands.append(query)
cursor.execute(query)
return True
else:
return False
def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
if not db_exists(cursor, db):
query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
if owner:
query_fragments.append('OWNER "%s"' % owner)
if template:
query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
if encoding:
query_fragments.append('ENCODING %(enc)s')
if lc_collate:
query_fragments.append('LC_COLLATE %(collate)s')
if lc_ctype:
query_fragments.append('LC_CTYPE %(ctype)s')
if tablespace:
query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace'))
if conn_limit:
query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
query = ' '.join(query_fragments)
executed_commands.append(cursor.mogrify(query, params))
cursor.execute(query, params)
return True
else:
db_info = get_db_info(cursor, db)
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
raise NotSupportedError(
'Changing database encoding is not supported. '
'Current encoding: %s' % db_info['encoding']
)
elif lc_collate and lc_collate != db_info['lc_collate']:
raise NotSupportedError(
'Changing LC_COLLATE is not supported. '
'Current LC_COLLATE: %s' % db_info['lc_collate']
)
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
raise NotSupportedError(
'Changing LC_CTYPE is not supported.'
'Current LC_CTYPE: %s' % db_info['lc_ctype']
)
else:
changed = False
if owner and owner != db_info['owner']:
changed = set_owner(cursor, db, owner)
if conn_limit and conn_limit != str(db_info['conn_limit']):
changed = set_conn_limit(cursor, db, conn_limit)
if tablespace and tablespace != db_info['tablespace']:
changed = set_tablespace(cursor, db, tablespace)
return changed
def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
if not db_exists(cursor, db):
return False
else:
db_info = get_db_info(cursor, db)
if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
return False
elif lc_collate and lc_collate != db_info['lc_collate']:
return False
elif lc_ctype and lc_ctype != db_info['lc_ctype']:
return False
elif owner and owner != db_info['owner']:
return False
elif conn_limit and conn_limit != str(db_info['conn_limit']):
return False
elif tablespace and tablespace != db_info['tablespace']:
return False
else:
return True
def db_dump(module, target, target_opts="",
db=None,
dump_extra_args=None,
user=None,
password=None,
host=None,
port=None,
**kw):
flags = login_flags(db, host, port, user, db_prefix=False)
cmd = module.get_bin_path('pg_dump', True)
comp_prog_path = None
if os.path.splitext(target)[-1] == '.tar':
flags.append(' --format=t')
elif os.path.splitext(target)[-1] == '.pgc':
flags.append(' --format=c')
if os.path.splitext(target)[-1] == '.gz':
if module.get_bin_path('pigz'):
comp_prog_path = module.get_bin_path('pigz', True)
else:
comp_prog_path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xz', True)
cmd += "".join(flags)
if dump_extra_args:
cmd += " {0} ".format(dump_extra_args)
if target_opts:
cmd += " {0} ".format(target_opts)
if comp_prog_path:
# Use a fifo to be notified of an error in pg_dump
# Using shell pipe has no way to return the code of the first command
# in a portable way.
fifo = os.path.join(module.tmpdir, 'pg_fifo')
os.mkfifo(fifo)
cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
else:
cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
return do_with_password(module, cmd, password)
def db_restore(module, target, target_opts="",
db=None,
user=None,
password=None,
host=None,
port=None,
**kw):
flags = login_flags(db, host, port, user)
comp_prog_path = None
cmd = module.get_bin_path('psql', True)
if os.path.splitext(target)[-1] == '.sql':
flags.append(' --file={0}'.format(target))
elif os.path.splitext(target)[-1] == '.tar':
flags.append(' --format=Tar')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.pgc':
flags.append(' --format=Custom')
cmd = module.get_bin_path('pg_restore', True)
elif os.path.splitext(target)[-1] == '.gz':
comp_prog_path = module.get_bin_path('zcat', True)
elif os.path.splitext(target)[-1] == '.bz2':
comp_prog_path = module.get_bin_path('bzcat', True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xzcat', True)
cmd += "".join(flags)
if target_opts:
cmd += " {0} ".format(target_opts)
if comp_prog_path:
env = os.environ.copy()
if password:
env = {"PGPASSWORD": password}
p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
(stdout2, stderr2) = p2.communicate()
p1.stdout.close()
p1.wait()
if p1.returncode != 0:
stderr1 = p1.stderr.read()
return p1.returncode, '', stderr1, 'cmd: ****'
else:
return p2.returncode, '', stderr2, 'cmd: ****'
else:
cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
return do_with_password(module, cmd, password)
def login_flags(db, host, port, user, db_prefix=True):
"""
returns a list of connection argument strings each prefixed
with a space and quoted where necessary to later be combined
in a single shell string with `"".join(rv)`
db_prefix determines if "--dbname" is prefixed to the db argument,
since the argument was introduced in 9.3.
"""
flags = []
if db:
if db_prefix:
flags.append(' --dbname={0}'.format(shlex_quote(db)))
else:
flags.append(' {0}'.format(shlex_quote(db)))
if host:
flags.append(' --host={0}'.format(host))
if port:
flags.append(' --port={0}'.format(port))
if user:
flags.append(' --username={0}'.format(user))
return flags
def do_with_password(module, cmd, password):
env = {}
if password:
env = {"PGPASSWORD": password}
executed_commands.append(cmd)
rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
return rc, stderr, stdout, cmd
def set_tablespace(cursor, db, tablespace):
query = "ALTER DATABASE %s SET TABLESPACE %s" % (
pg_quote_identifier(db, 'database'),
pg_quote_identifier(tablespace, 'tablespace'))
executed_commands.append(query)
cursor.execute(query)
return True
# ===========================================
# Module execution.
#
def main():
argument_spec = pgutils.postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', required=True, aliases=['name']),
owner=dict(type='str', default=''),
template=dict(type='str', default=''),
encoding=dict(type='str', default=''),
lc_collate=dict(type='str', default=''),
lc_ctype=dict(type='str', default=''),
state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
target=dict(type='path', default=''),
target_opts=dict(type='str', default=''),
maintenance_db=dict(type='str', default="postgres"),
session_role=dict(type='str'),
conn_limit=dict(type='str', default=''),
tablespace=dict(type='path', default=''),
dump_extra_args=dict(type='str', default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
db = module.params["db"]
owner = module.params["owner"]
template = module.params["template"]
encoding = module.params["encoding"]
lc_collate = module.params["lc_collate"]
lc_ctype = module.params["lc_ctype"]
target = module.params["target"]
target_opts = module.params["target_opts"]
state = module.params["state"]
changed = False
maintenance_db = module.params['maintenance_db']
session_role = module.params["session_role"]
conn_limit = module.params['conn_limit']
tablespace = module.params['tablespace']
dump_extra_args = module.params['dump_extra_args']
raw_connection = state in ("dump", "restore")
if not raw_connection:
pgutils.ensure_required_libs(module)
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if target == "":
target = "{0}/{1}.sql".format(os.getcwd(), db)
target = os.path.expanduser(target)
if not raw_connection:
try:
db_connection = psycopg2.connect(database=maintenance_db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
exception=traceback.format_exc())
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
if session_role:
try:
cursor.execute('SET ROLE "%s"' % session_role)
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
try:
if module.check_mode:
if state == "absent":
changed = db_exists(cursor, db)
elif state == "present":
changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
if state == "absent":
try:
changed = db_delete(cursor, db)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state == "present":
try:
changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
elif state in ("dump", "restore"):
method = state == "dump" and db_dump or db_restore
try:
if state == 'dump':
rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
else:
rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
if rc != 0:
module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
else:
module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
executed_commands=executed_commands)
except SQLParseError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# Avoid catching this on Python 2.4
raise
except Exception as e:
module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,377,311,214,758,752,300 | -2,525,531,876,917,266,000 | 34.587519 | 152 | 0.623198 | false |
titasakgm/brc-stock | openerp/addons/note_pad/note_pad.py | 441 | 1301 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
class note_pad_note(osv.osv):
""" memo pad """
_name = 'note.note'
_inherit = ['pad.common','note.note']
_pad_fields = ['note_pad']
_columns = {
'note_pad_url': fields.char('Pad Url', pad_content_field='memo'),
} | agpl-3.0 | -6,463,081,940,853,358,000 | 6,982,185,129,189,281,000 | 36.2 | 78 | 0.598002 | false |
makermade/arm_android-19_arm-linux-androideabi-4.8 | lib/python2.7/mailbox.py | 64 | 80751 | #! /usr/bin/env python
"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
#
# Remember to fsync() changes to disk before closing a modified file
# or returning from a flush() method. See functions _sync_flush() and
# _sync_close().
import sys
import os
import time
import calendar
import socket
import errno
import copy
import email
import email.message
import email.generator
import StringIO
try:
if sys.platform == 'os2emx':
# OS/2 EMX fcntl() not adequate
raise ImportError
import fcntl
except ImportError:
fcntl = None
import warnings
with warnings.catch_warnings():
if sys.py3kwarning:
warnings.filterwarnings("ignore", ".*rfc822 has been removed",
DeprecationWarning)
import rfc822
__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
'BabylMessage', 'MMDFMessage', 'UnixMailbox',
'PortableUnixMailbox', 'MmdfMailbox', 'MHMailbox', 'BabylMailbox' ]
class Mailbox:
"""A group of messages in a particular place."""
def __init__(self, path, factory=None, create=True):
"""Initialize a Mailbox instance."""
self._path = os.path.abspath(os.path.expanduser(path))
self._factory = factory
def add(self, message):
"""Add message and return assigned key."""
raise NotImplementedError('Method must be implemented by subclass')
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def __delitem__(self, key):
self.remove(key)
def discard(self, key):
"""If the keyed message exists, remove it."""
try:
self.remove(key)
except KeyError:
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def get(self, key, default=None):
"""Return the keyed message, or default if it doesn't exist."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
return self._factory(self.get_file(key))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def iterkeys(self):
"""Return an iterator over keys."""
raise NotImplementedError('Method must be implemented by subclass')
def keys(self):
"""Return a list of keys."""
return list(self.iterkeys())
def itervalues(self):
"""Return an iterator over all messages."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield value
def __iter__(self):
return self.itervalues()
def values(self):
"""Return a list of messages. Memory intensive."""
return list(self.itervalues())
def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield (key, value)
def items(self):
"""Return a list of (key, message) tuples. Memory intensive."""
return list(self.iteritems())
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
raise NotImplementedError('Method must be implemented by subclass')
def __contains__(self, key):
return self.has_key(key)
def __len__(self):
"""Return a count of messages in the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def clear(self):
"""Delete all messages."""
for key in self.iterkeys():
self.discard(key)
def pop(self, key, default=None):
"""Delete the keyed message and return it, or default."""
try:
result = self[key]
except KeyError:
return default
self.discard(key)
return result
def popitem(self):
"""Delete an arbitrary (key, message) pair and return it."""
for key in self.iterkeys():
return (key, self.pop(key)) # This is only run once.
else:
raise KeyError('No messages in mailbox')
def update(self, arg=None):
"""Change the messages that correspond to certain keys."""
if hasattr(arg, 'iteritems'):
source = arg.iteritems()
elif hasattr(arg, 'items'):
source = arg.items()
else:
source = arg
bad_key = False
for key, message in source:
try:
self[key] = message
except KeyError:
bad_key = True
if bad_key:
raise KeyError('No message with key(s)')
def flush(self):
"""Write any pending changes to the disk."""
raise NotImplementedError('Method must be implemented by subclass')
def lock(self):
"""Lock the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def unlock(self):
"""Unlock the mailbox if it is locked."""
raise NotImplementedError('Method must be implemented by subclass')
def close(self):
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
# Whether each message must end in a newline
_append_newline = False
def _dump_message(self, message, target, mangle_from_=False):
# Most files are opened in binary mode to allow predictable seeking.
# To get native line endings on disk, the user-friendly \n line endings
# used in strings and by email.Message are translated here.
"""Dump message contents to target file."""
if isinstance(message, email.message.Message):
buffer = StringIO.StringIO()
gen = email.generator.Generator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
data = buffer.read().replace('\n', os.linesep)
target.write(data)
if self._append_newline and not data.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
elif isinstance(message, str):
if mangle_from_:
message = message.replace('\nFrom ', '\n>From ')
message = message.replace('\n', os.linesep)
target.write(message)
if self._append_newline and not message.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
elif hasattr(message, 'read'):
lastline = None
while True:
line = message.readline()
if line == '':
break
if mangle_from_ and line.startswith('From '):
line = '>From ' + line[5:]
line = line.replace('\n', os.linesep)
target.write(line)
lastline = line
if self._append_newline and lastline and not lastline.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
else:
raise TypeError('Invalid message type: %s' % type(message))
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
colon = ':'
def __init__(self, dirname, factory=rfc822.Message, create=True):
"""Initialize a Maildir instance."""
Mailbox.__init__(self, dirname, factory, create)
self._paths = {
'tmp': os.path.join(self._path, 'tmp'),
'new': os.path.join(self._path, 'new'),
'cur': os.path.join(self._path, 'cur'),
}
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
for path in self._paths.values():
os.mkdir(path, 0o700)
else:
raise NoSuchMailboxError(self._path)
self._toc = {}
self._toc_mtimes = {'cur': 0, 'new': 0}
self._last_read = 0 # Records last time we read cur/new
self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing
def add(self, message):
"""Add message and return assigned key."""
tmp_file = self._create_tmp()
try:
self._dump_message(message, tmp_file)
except BaseException:
tmp_file.close()
os.remove(tmp_file.name)
raise
_sync_close(tmp_file)
if isinstance(message, MaildirMessage):
subdir = message.get_subdir()
suffix = self.colon + message.get_info()
if suffix == self.colon:
suffix = ''
else:
subdir = 'new'
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
os.remove(tmp_file.name)
else:
os.rename(tmp_file.name, dest)
except OSError, e:
os.remove(tmp_file.name)
if e.errno == errno.EEXIST:
raise ExternalClashError('Name clash with existing message: %s'
% dest)
else:
raise
if isinstance(message, MaildirMessage):
os.utime(dest, (os.path.getatime(dest), message.get_date()))
return uniq
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key)))
def discard(self, key):
"""If the keyed message exists, remove it."""
# This overrides an inapplicable implementation in the superclass.
try:
self.remove(key)
except KeyError:
pass
except OSError, e:
if e.errno != errno.ENOENT:
raise
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
old_subpath = self._lookup(key)
temp_key = self.add(message)
temp_subpath = self._lookup(temp_key)
if isinstance(message, MaildirMessage):
# temp's subdir and suffix were specified by message.
dominant_subpath = temp_subpath
else:
# temp's subdir and suffix were defaults from add().
dominant_subpath = old_subpath
subdir = os.path.dirname(dominant_subpath)
if self.colon in dominant_subpath:
suffix = self.colon + dominant_subpath.split(self.colon)[-1]
else:
suffix = ''
self.discard(key)
new_path = os.path.join(self._path, subdir, key + suffix)
os.rename(os.path.join(self._path, temp_subpath), new_path)
if isinstance(message, MaildirMessage):
os.utime(new_path, (os.path.getatime(new_path),
message.get_date()))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
subpath = self._lookup(key)
f = open(os.path.join(self._path, subpath), 'r')
try:
if self._factory:
msg = self._factory(f)
else:
msg = MaildirMessage(f)
finally:
f.close()
subdir, name = os.path.split(subpath)
msg.set_subdir(subdir)
if self.colon in name:
msg.set_info(name.split(self.colon)[-1])
msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'r')
try:
return f.read()
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'rb')
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
self._refresh()
for key in self._toc:
try:
self._lookup(key)
except KeyError:
continue
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._refresh()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._refresh()
return len(self._toc)
def flush(self):
"""Write any pending changes to disk."""
# Maildir changes are always written immediately, so there's nothing
# to do.
pass
def lock(self):
"""Lock the mailbox."""
return
def unlock(self):
"""Unlock the mailbox if it is locked."""
return
def close(self):
"""Flush and close the mailbox."""
return
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if len(entry) > 1 and entry[0] == '.' and \
os.path.isdir(os.path.join(self._path, entry)):
result.append(entry[1:])
return result
def get_folder(self, folder):
"""Return a Maildir instance for the named folder."""
return Maildir(os.path.join(self._path, '.' + folder),
factory=self._factory,
create=False)
def add_folder(self, folder):
"""Create a folder and return a Maildir instance representing it."""
path = os.path.join(self._path, '.' + folder)
result = Maildir(path, factory=self._factory)
maildirfolder_path = os.path.join(path, 'maildirfolder')
if not os.path.exists(maildirfolder_path):
os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY,
0666))
return result
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, '.' + folder)
for entry in os.listdir(os.path.join(path, 'new')) + \
os.listdir(os.path.join(path, 'cur')):
if len(entry) < 1 or entry[0] != '.':
raise NotEmptyError('Folder contains message(s): %s' % folder)
for entry in os.listdir(path):
if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
os.path.isdir(os.path.join(path, entry)):
raise NotEmptyError("Folder contains subdirectory '%s': %s" %
(folder, entry))
for root, dirs, files in os.walk(path, topdown=False):
for entry in files:
os.remove(os.path.join(root, entry))
for entry in dirs:
os.rmdir(os.path.join(root, entry))
os.rmdir(path)
def clean(self):
"""Delete old files in "tmp"."""
now = time.time()
for entry in os.listdir(os.path.join(self._path, 'tmp')):
path = os.path.join(self._path, 'tmp', entry)
if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
os.remove(path)
_count = 1 # This is used to generate unique file names.
def _create_tmp(self):
"""Create a file in the tmp subdirectory and open and return it."""
now = time.time()
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
Maildir._count, hostname)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except OSError, e:
if e.errno == errno.ENOENT:
Maildir._count += 1
try:
return _create_carefully(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
else:
raise
# Fall through to here if stat succeeded or open raised EEXIST.
raise ExternalClashError('Name clash prevented file creation: %s' %
path)
def _refresh(self):
"""Update table of contents mapping."""
# If it has been less than two seconds since the last _refresh() call,
# we have to unconditionally re-read the mailbox just in case it has
# been modified, because os.path.mtime() has a 2 sec resolution in the
# most common worst case (FAT) and a 1 sec resolution typically. This
# results in a few unnecessary re-reads when _refresh() is called
# multiple times in that interval, but once the clock ticks over, we
# will only re-read as needed. Because the filesystem might be being
# served by an independent system with its own clock, we record and
# compare with the mtimes from the filesystem. Because the other
# system's clock might be skewing relative to our clock, we add an
# extra delta to our wait. The default is one tenth second, but is an
# instance variable and so can be adjusted if dealing with a
# particularly skewed or irregular system.
if time.time() - self._last_read > 2 + self._skewfactor:
refresh = False
for subdir in self._toc_mtimes:
mtime = os.path.getmtime(self._paths[subdir])
if mtime > self._toc_mtimes[subdir]:
refresh = True
self._toc_mtimes[subdir] = mtime
if not refresh:
return
# Refresh toc
self._toc = {}
for subdir in self._toc_mtimes:
path = self._paths[subdir]
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
self._last_read = time.time()
def _lookup(self, key):
"""Use TOC to return subpath for given key, or raise a KeyError."""
try:
if os.path.exists(os.path.join(self._path, self._toc[key])):
return self._toc[key]
except KeyError:
pass
self._refresh()
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
# This method is for backward compatibility only.
def next(self):
"""Return the next message in a one-time iteration."""
if not hasattr(self, '_onetime_keys'):
self._onetime_keys = self.iterkeys()
while True:
try:
return self[self._onetime_keys.next()]
except StopIteration:
return None
except KeyError:
continue
class _singlefileMailbox(Mailbox):
"""A single-file mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize a single-file mailbox."""
Mailbox.__init__(self, path, factory, create)
try:
f = open(self._path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
if create:
f = open(self._path, 'wb+')
else:
raise NoSuchMailboxError(self._path)
elif e.errno in (errno.EACCES, errno.EROFS):
f = open(self._path, 'rb')
else:
raise
self._file = f
self._toc = None
self._next_key = 0
self._pending = False # No changes require rewriting the file.
self._pending_sync = False # No need to sync the file
self._locked = False
self._file_length = None # Used to record mailbox size
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
# _append_message appends the message to the mailbox file. We
# don't need a full rewrite + rename, sync is enough.
self._pending_sync = True
return self._next_key - 1
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
self._toc[key] = self._append_message(message)
self._pending = True
def iterkeys(self):
"""Return an iterator over keys."""
self._lookup()
for key in self._toc.keys():
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._lookup()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._lookup()
return len(self._toc)
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
self._locked = False
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
if self._pending_sync:
# Messages have only been added, so syncing the file
# is enough.
_sync_flush(self._file)
self._pending_sync = False
return
# In order to be writing anything out at all, self._toc must
# already have been generated (and presumably has been modified
# by adding or deleting an item).
assert self._toc is not None
# Check length of self._file; if it's changed, some other process
# has modified the mailbox since we scanned it.
self._file.seek(0, 2)
cur_len = self._file.tell()
if cur_len != self._file_length:
raise ExternalClashError('Size of mailbox file changed '
'(expected %i, found %i)' %
(self._file_length, cur_len))
new_file = _create_temporary(self._path)
try:
new_toc = {}
self._pre_mailbox_hook(new_file)
for key in sorted(self._toc.keys()):
start, stop = self._toc[key]
self._file.seek(start)
self._pre_message_hook(new_file)
new_start = new_file.tell()
while True:
buffer = self._file.read(min(4096,
stop - self._file.tell()))
if buffer == '':
break
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
self._file_length = new_file.tell()
except:
new_file.close()
os.remove(new_file.name)
raise
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
# Make sure the new file's mode is the same as the old file's
mode = os.stat(self._path).st_mode
os.chmod(new_file.name, mode)
try:
os.rename(new_file.name, self._path)
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(self._path)
os.rename(new_file.name, self._path)
else:
raise
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
self._pending_sync = False
if self._locked:
_lock_file(self._file, dotlock=False)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
return
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
return
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
return
def close(self):
"""Flush and close the mailbox."""
self.flush()
if self._locked:
self.unlock()
self._file.close() # Sync has been done by self.flush() above.
def _lookup(self, key=None):
"""Return (start, stop) or raise KeyError."""
if self._toc is None:
self._generate_toc()
if key is not None:
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
def _append_message(self, message):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
before = self._file.tell()
if len(self._toc) == 0 and not self._pending:
# This is the first message, and the _pre_mailbox_hook
# hasn't yet been called. If self._pending is True,
# messages have been removed, so _pre_mailbox_hook must
# have been called already.
self._pre_mailbox_hook(self._file)
try:
self._pre_message_hook(self._file)
offsets = self._install_message(message)
self._post_message_hook(self._file)
except BaseException:
self._file.truncate(before)
raise
self._file.flush()
self._file_length = self._file.tell() # Record current length of mailbox
return offsets
class _mboxMMDF(_singlefileMailbox):
"""An mbox or MMDF mailbox."""
_mangle_from_ = True
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(os.linesep, '')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(os.linesep, '\n'))
msg.set_from(from_line[5:])
return msg
def get_string(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
string = self._file.read(stop - self._file.tell())
return string.replace(os.linesep, '\n')
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
def _install_message(self, message):
"""Format a message and blindly write to self._file."""
from_line = None
if isinstance(message, str) and message.startswith('From '):
newline = message.find('\n')
if newline != -1:
from_line = message[:newline]
message = message[newline + 1:]
else:
from_line = message
message = ''
elif isinstance(message, _mboxMMDFMessage):
from_line = 'From ' + message.get_from()
elif isinstance(message, email.message.Message):
from_line = message.get_unixfrom() # May be None.
if from_line is None:
from_line = 'From MAILER-DAEMON %s' % time.asctime(time.gmtime())
start = self._file.tell()
self._file.write(from_line + os.linesep)
self._dump_message(message, self._file, self._mangle_from_)
stop = self._file.tell()
return (start, stop)
class mbox(_mboxMMDF):
"""A classic mbox mailbox."""
_mangle_from_ = True
# All messages must end in a newline character, and
# _post_message_hooks outputs an empty line between messages.
_append_newline = True
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
last_was_empty = False
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith('From '):
if len(stops) < len(starts):
if last_was_empty:
stops.append(line_pos - len(os.linesep))
else:
# The last line before the "From " line wasn't
# blank, but we consider it a start of a
# message anyway.
stops.append(line_pos)
starts.append(line_pos)
last_was_empty = False
elif not line:
if last_was_empty:
stops.append(line_pos - len(os.linesep))
else:
stops.append(line_pos)
break
elif line == os.linesep:
last_was_empty = True
else:
last_was_empty = False
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file_length = self._file.tell()
class MMDF(_mboxMMDF):
"""An MMDF mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MMDF mailbox."""
self._message_factory = MMDFMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\001\001\001\001' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\001\001\001\001' + os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line.startswith('\001\001\001\001' + os.linesep):
starts.append(next_pos)
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\001\001\001\001' + os.linesep:
stops.append(line_pos - len(os.linesep))
break
elif line == '':
stops.append(line_pos)
break
elif line == '':
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
class MH(Mailbox):
"""An MH mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MH instance."""
Mailbox.__init__(self, path, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
os.close(os.open(os.path.join(self._path, '.mh_sequences'),
os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0600))
else:
raise NoSuchMailboxError(self._path)
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
keys = self.keys()
if len(keys) == 0:
new_key = 1
else:
new_key = max(keys) + 1
new_path = os.path.join(self._path, str(new_key))
f = _create_carefully(new_path)
closed = False
try:
if self._locked:
_lock_file(f)
try:
try:
self._dump_message(message, f)
except BaseException:
# Unlock and close so it can be deleted on Windows
if self._locked:
_unlock_file(f)
_sync_close(f)
closed = True
os.remove(new_path)
raise
if isinstance(message, MHMessage):
self._dump_sequences(message, new_key)
finally:
if self._locked:
_unlock_file(f)
finally:
if not closed:
_sync_close(f)
return new_key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
else:
f.close()
os.remove(path)
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
for name, key_list in self.get_sequences().iteritems():
if key in key_list:
msg.add_sequence(name)
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
return f.read()
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
try:
f = open(os.path.join(self._path, str(key)), 'rb')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
return iter(sorted(int(entry) for entry in os.listdir(self._path)
if entry.isdigit()))
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
return os.path.exists(os.path.join(self._path, str(key)))
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
def lock(self):
"""Lock the mailbox."""
if not self._locked:
self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
_sync_close(self._file)
del self._file
self._locked = False
def flush(self):
"""Write any pending changes to the disk."""
return
def close(self):
"""Flush and close the mailbox."""
if self._locked:
self.unlock()
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if os.path.isdir(os.path.join(self._path, entry)):
result.append(entry)
return result
def get_folder(self, folder):
"""Return an MH instance for the named folder."""
return MH(os.path.join(self._path, folder),
factory=self._factory, create=False)
def add_folder(self, folder):
"""Create a folder and return an MH instance representing it."""
return MH(os.path.join(self._path, folder),
factory=self._factory)
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, folder)
entries = os.listdir(path)
if entries == ['.mh_sequences']:
os.remove(os.path.join(path, '.mh_sequences'))
elif entries == []:
pass
else:
raise NotEmptyError('Folder not empty: %s' % self._path)
os.rmdir(path)
def get_sequences(self):
"""Return a name-to-key-list dictionary to define each sequence."""
results = {}
f = open(os.path.join(self._path, '.mh_sequences'), 'r')
try:
all_keys = set(self.keys())
for line in f:
try:
name, contents = line.split(':')
keys = set()
for spec in contents.split():
if spec.isdigit():
keys.add(int(spec))
else:
start, stop = (int(x) for x in spec.split('-'))
keys.update(range(start, stop + 1))
results[name] = [key for key in sorted(keys) \
if key in all_keys]
if len(results[name]) == 0:
del results[name]
except ValueError:
raise FormatError('Invalid sequence specification: %s' %
line.rstrip())
finally:
f.close()
return results
def set_sequences(self, sequences):
"""Set sequences using the given name-to-key-list dictionary."""
f = open(os.path.join(self._path, '.mh_sequences'), 'r+')
try:
os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
for name, keys in sequences.iteritems():
if len(keys) == 0:
continue
f.write('%s:' % name)
prev = None
completing = False
for key in sorted(set(keys)):
if key - 1 == prev:
if not completing:
completing = True
f.write('-')
elif completing:
completing = False
f.write('%s %s' % (prev, key))
else:
f.write(' %s' % key)
prev = key
if completing:
f.write(str(prev) + '\n')
else:
f.write('\n')
finally:
_sync_close(f)
def pack(self):
"""Re-name messages to eliminate numbering gaps. Invalidates keys."""
sequences = self.get_sequences()
prev = 0
changes = []
for key in self.iterkeys():
if key - 1 != prev:
changes.append((key, prev + 1))
if hasattr(os, 'link'):
os.link(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
os.unlink(os.path.join(self._path, str(key)))
else:
os.rename(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
prev += 1
self._next_key = prev + 1
if len(changes) == 0:
return
for name, key_list in sequences.items():
for old, new in changes:
if old in key_list:
key_list[key_list.index(old)] = new
self.set_sequences(sequences)
def _dump_sequences(self, message, key):
"""Inspect a new MHMessage and update sequences appropriately."""
pending_sequences = message.get_sequences()
all_sequences = self.get_sequences()
for name, key_list in all_sequences.iteritems():
if name in pending_sequences:
key_list.append(key)
elif key in key_list:
del key_list[key_list.index(key)]
for sequence in pending_sequences:
if sequence not in all_sequences:
all_sequences[sequence] = [key]
self.set_sequences(all_sequences)
class Babyl(_singlefileMailbox):
"""An Rmail-style Babyl mailbox."""
_special_labels = frozenset(('unseen', 'deleted', 'filed', 'answered',
'forwarded', 'edited', 'resent'))
def __init__(self, path, factory=None, create=True):
"""Initialize a Babyl mailbox."""
_singlefileMailbox.__init__(self, path, factory, create)
self._labels = {}
def add(self, message):
"""Add message and return assigned key."""
key = _singlefileMailbox.add(self, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
return key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.remove(self, key)
if key in self._labels:
del self._labels[key]
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.__setitem__(self, key, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
visible_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
visible_headers.write(line.replace(os.linesep, '\n'))
body = self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
msg = BabylMessage(original_headers.getvalue() + body)
msg.set_visible(visible_headers.getvalue())
if key in self._labels:
msg.set_labels(self._labels[key])
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
return original_headers.getvalue() + \
self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return StringIO.StringIO(self.get_string(key).replace('\n',
os.linesep))
def get_labels(self):
"""Return a list of user-defined labels in the mailbox."""
self._lookup()
labels = set()
for label_list in self._labels.values():
labels.update(label_list)
labels.difference_update(self._special_labels)
return list(labels)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
label_lists = []
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\037\014' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
starts.append(next_pos)
labels = [label.strip() for label
in self._file.readline()[1:].split(',')
if label.strip() != '']
label_lists.append(labels)
elif line == '\037' or line == '\037' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
elif line == '':
stops.append(line_pos - len(os.linesep))
break
self._toc = dict(enumerate(zip(starts, stops)))
self._labels = dict(enumerate(label_lists))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
f.write('BABYL OPTIONS:%sVersion: 5%sLabels:%s%s\037' %
(os.linesep, os.linesep, ','.join(self.get_labels()),
os.linesep))
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\014' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\037')
def _install_message(self, message):
"""Write message contents and return (start, stop)."""
start = self._file.tell()
if isinstance(message, BabylMessage):
special_labels = []
labels = []
for label in message.get_labels():
if label in self._special_labels:
special_labels.append(label)
else:
labels.append(label)
self._file.write('1')
for label in special_labels:
self._file.write(', ' + label)
self._file.write(',,')
for label in labels:
self._file.write(' ' + label + ',')
self._file.write(os.linesep)
else:
self._file.write('1,,' + os.linesep)
if isinstance(message, email.message.Message):
orig_buffer = StringIO.StringIO()
orig_generator = email.generator.Generator(orig_buffer, False, 0)
orig_generator.flatten(message)
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
self._file.write('*** EOOH ***' + os.linesep)
if isinstance(message, BabylMessage):
vis_buffer = StringIO.StringIO()
vis_generator = email.generator.Generator(vis_buffer, False, 0)
vis_generator.flatten(message.get_visible())
while True:
line = vis_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
else:
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
while True:
buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
elif isinstance(message, str):
body_start = message.find('\n\n') + 2
if body_start - 2 != -1:
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write('*** EOOH ***' + os.linesep)
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write(message[body_start:].replace('\n',
os.linesep))
else:
self._file.write('*** EOOH ***' + os.linesep + os.linesep)
self._file.write(message.replace('\n', os.linesep))
elif hasattr(message, 'readline'):
original_pos = message.tell()
first_pass = True
while True:
line = message.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
if first_pass:
first_pass = False
self._file.write('*** EOOH ***' + os.linesep)
message.seek(original_pos)
else:
break
while True:
buffer = message.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
else:
raise TypeError('Invalid message type: %s' % type(message))
stop = self._file.tell()
return (start, stop)
class Message(email.message.Message):
"""Message with mailbox-format-specific properties."""
def __init__(self, message=None):
"""Initialize a Message instance."""
if isinstance(message, email.message.Message):
self._become_message(copy.deepcopy(message))
if isinstance(message, Message):
message._explain_to(self)
elif isinstance(message, str):
self._become_message(email.message_from_string(message))
elif hasattr(message, "read"):
self._become_message(email.message_from_file(message))
elif message is None:
email.message.Message.__init__(self)
else:
raise TypeError('Invalid message type: %s' % type(message))
def _become_message(self, message):
"""Assume the non-format-specific state of message."""
for name in ('_headers', '_unixfrom', '_payload', '_charset',
'preamble', 'epilogue', 'defects', '_default_type'):
self.__dict__[name] = message.__dict__[name]
def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type')
class MaildirMessage(Message):
"""Message with Maildir-specific properties."""
def __init__(self, message=None):
"""Initialize a MaildirMessage instance."""
self._subdir = 'new'
self._info = ''
self._date = time.time()
Message.__init__(self, message)
def get_subdir(self):
"""Return 'new' or 'cur'."""
return self._subdir
def set_subdir(self, subdir):
"""Set subdir to 'new' or 'cur'."""
if subdir == 'new' or subdir == 'cur':
self._subdir = subdir
else:
raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
def get_flags(self):
"""Return as a string the flags that are set."""
if self._info.startswith('2,'):
return self._info[2:]
else:
return ''
def set_flags(self, flags):
"""Set the given flags and unset all others."""
self._info = '2,' + ''.join(sorted(flags))
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if self.get_flags() != '':
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
def set_date(self, date):
"""Set delivery date of message, in seconds since the epoch."""
try:
self._date = float(date)
except ValueError:
raise TypeError("can't convert to float: %s" % date)
def get_info(self):
"""Get the message's "info" as a string."""
return self._info
def set_info(self, info):
"""Set the message's "info" string."""
if isinstance(info, str):
self._info = info
else:
raise TypeError('info must be a string: %s' % type(info))
def _explain_to(self, message):
"""Copy Maildir-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
message.set_flags(self.get_flags())
message.set_subdir(self.get_subdir())
message.set_date(self.get_date())
elif isinstance(message, _mboxMMDFMessage):
flags = set(self.get_flags())
if 'S' in flags:
message.add_flag('R')
if self.get_subdir() == 'cur':
message.add_flag('O')
if 'T' in flags:
message.add_flag('D')
if 'F' in flags:
message.add_flag('F')
if 'R' in flags:
message.add_flag('A')
message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_sequence('unseen')
if 'R' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_label('unseen')
if 'T' in flags:
message.add_label('deleted')
if 'R' in flags:
message.add_label('answered')
if 'P' in flags:
message.add_label('forwarded')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class _mboxMMDFMessage(Message):
"""Message with mbox- or MMDF-specific properties."""
def __init__(self, message=None):
"""Initialize an mboxMMDFMessage instance."""
self.set_from('MAILER-DAEMON', True)
if isinstance(message, email.message.Message):
unixfrom = message.get_unixfrom()
if unixfrom is not None and unixfrom.startswith('From '):
self.set_from(unixfrom[5:])
Message.__init__(self, message)
def get_from(self):
"""Return contents of "From " line."""
return self._from
def set_from(self, from_, time_=None):
"""Set "From " line, formatting and appending time_ if specified."""
if time_ is not None:
if time_ is True:
time_ = time.gmtime()
from_ += ' ' + time.asctime(time_)
self._from = from_
def get_flags(self):
"""Return as a string the flags that are set."""
return self.get('Status', '') + self.get('X-Status', '')
def set_flags(self, flags):
"""Set the given flags and unset all others."""
flags = set(flags)
status_flags, xstatus_flags = '', ''
for flag in ('R', 'O'):
if flag in flags:
status_flags += flag
flags.remove(flag)
for flag in ('D', 'F', 'A'):
if flag in flags:
xstatus_flags += flag
flags.remove(flag)
xstatus_flags += ''.join(sorted(flags))
try:
self.replace_header('Status', status_flags)
except KeyError:
self.add_header('Status', status_flags)
try:
self.replace_header('X-Status', xstatus_flags)
except KeyError:
self.add_header('X-Status', xstatus_flags)
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def _explain_to(self, message):
"""Copy mbox- or MMDF-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
flags = set(self.get_flags())
if 'O' in flags:
message.set_subdir('cur')
if 'F' in flags:
message.add_flag('F')
if 'A' in flags:
message.add_flag('R')
if 'R' in flags:
message.add_flag('S')
if 'D' in flags:
message.add_flag('T')
del message['status']
del message['x-status']
maybe_date = ' '.join(self.get_from().split()[-5:])
try:
message.set_date(calendar.timegm(time.strptime(maybe_date,
'%a %b %d %H:%M:%S %Y')))
except (ValueError, OverflowError):
pass
elif isinstance(message, _mboxMMDFMessage):
message.set_flags(self.get_flags())
message.set_from(self.get_from())
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_sequence('unseen')
if 'A' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
del message['status']
del message['x-status']
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_label('unseen')
if 'D' in flags:
message.add_label('deleted')
if 'A' in flags:
message.add_label('answered')
del message['status']
del message['x-status']
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class mboxMessage(_mboxMMDFMessage):
"""Message with mbox-specific properties."""
class MHMessage(Message):
"""Message with MH-specific properties."""
def __init__(self, message=None):
"""Initialize an MHMessage instance."""
self._sequences = []
Message.__init__(self, message)
def get_sequences(self):
"""Return a list of sequences that include the message."""
return self._sequences[:]
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
def add_sequence(self, sequence):
"""Add sequence to list of sequences including the message."""
if isinstance(sequence, str):
if not sequence in self._sequences:
self._sequences.append(sequence)
else:
raise TypeError('sequence must be a string: %s' % type(sequence))
def remove_sequence(self, sequence):
"""Remove sequence from the list of sequences including the message."""
try:
self._sequences.remove(sequence)
except ValueError:
pass
def _explain_to(self, message):
"""Copy MH-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('R')
elif isinstance(message, _mboxMMDFMessage):
sequences = set(self.get_sequences())
if 'unseen' not in sequences:
message.add_flag('RO')
else:
message.add_flag('O')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('A')
elif isinstance(message, MHMessage):
for sequence in self.get_sequences():
message.add_sequence(sequence)
elif isinstance(message, BabylMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.add_label('unseen')
if 'replied' in sequences:
message.add_label('answered')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class BabylMessage(Message):
"""Message with Babyl-specific properties."""
def __init__(self, message=None):
"""Initialize an BabylMessage instance."""
self._labels = []
self._visible = Message()
Message.__init__(self, message)
def get_labels(self):
"""Return a list of labels on the message."""
return self._labels[:]
def set_labels(self, labels):
"""Set the list of labels on the message."""
self._labels = list(labels)
def add_label(self, label):
"""Add label to list of labels on the message."""
if isinstance(label, str):
if label not in self._labels:
self._labels.append(label)
else:
raise TypeError('label must be a string: %s' % type(label))
def remove_label(self, label):
"""Remove label from the list of labels on the message."""
try:
self._labels.remove(label)
except ValueError:
pass
def get_visible(self):
"""Return a Message representation of visible headers."""
return Message(self._visible)
def set_visible(self, visible):
"""Set the Message representation of visible headers."""
self._visible = Message(visible)
def update_visible(self):
"""Update and/or sensibly generate a set of visible headers."""
for header in self._visible.keys():
if header in self:
self._visible.replace_header(header, self[header])
else:
del self._visible[header]
for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
if header in self and header not in self._visible:
self._visible[header] = self[header]
def _explain_to(self, message):
"""Copy Babyl-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'forwarded' in labels or 'resent' in labels:
message.add_flag('P')
if 'answered' in labels:
message.add_flag('R')
if 'deleted' in labels:
message.add_flag('T')
elif isinstance(message, _mboxMMDFMessage):
labels = set(self.get_labels())
if 'unseen' not in labels:
message.add_flag('RO')
else:
message.add_flag('O')
if 'deleted' in labels:
message.add_flag('D')
if 'answered' in labels:
message.add_flag('A')
elif isinstance(message, MHMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.add_sequence('unseen')
if 'answered' in labels:
message.add_sequence('replied')
elif isinstance(message, BabylMessage):
message.set_visible(self.get_visible())
for label in self.get_labels():
message.add_label(label)
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class MMDFMessage(_mboxMMDFMessage):
"""Message with MMDF-specific properties."""
class _ProxyFile:
"""A read-only wrapper of a file."""
def __init__(self, f, pos=None):
"""Initialize a _ProxyFile."""
self._file = f
if pos is None:
self._pos = f.tell()
else:
self._pos = pos
def read(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read)
def readline(self, size=None):
"""Read a line."""
return self._read(size, self._file.readline)
def readlines(self, sizehint=None):
"""Read multiple lines."""
result = []
for line in self:
result.append(line)
if sizehint is not None:
sizehint -= len(line)
if sizehint <= 0:
break
return result
def __iter__(self):
"""Iterate over lines."""
return iter(self.readline, "")
def tell(self):
"""Return the position."""
return self._pos
def seek(self, offset, whence=0):
"""Change position."""
if whence == 1:
self._file.seek(self._pos)
self._file.seek(offset, whence)
self._pos = self._file.tell()
def close(self):
"""Close the file."""
if hasattr(self, '_file'):
if hasattr(self._file, 'close'):
self._file.close()
del self._file
def _read(self, size, read_method):
"""Read size bytes using read_method."""
if size is None:
size = -1
self._file.seek(self._pos)
result = read_method(size)
self._pos = self._file.tell()
return result
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
def __init__(self, f, start=None, stop=None):
"""Initialize a _PartialFile."""
_ProxyFile.__init__(self, f, start)
self._start = start
self._stop = stop
def tell(self):
"""Return the position with respect to start."""
return _ProxyFile.tell(self) - self._start
def seek(self, offset, whence=0):
"""Change position, possibly with respect to start or stop."""
if whence == 0:
self._pos = self._start
whence = 1
elif whence == 2:
self._pos = self._stop
whence = 1
_ProxyFile.seek(self, offset, whence)
def _read(self, size, read_method):
"""Read size bytes using read_method, honoring start and stop."""
remaining = self._stop - self._pos
if remaining <= 0:
return ''
if size is None or size < 0 or size > remaining:
size = remaining
return _ProxyFile._read(self, size, read_method)
def close(self):
# do *not* close the underlying file object for partial files,
# since it's global to the mailbox object
if hasattr(self, '_file'):
del self._file
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except IOError, e:
if e.errno in (errno.EACCES, errno.EROFS):
return # Without write access, just skip dotlocking.
else:
raise
try:
if hasattr(os, 'link'):
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
os.unlink(pre_lock.name)
else:
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
else:
raise
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
def _unlock_file(f):
"""Unlock file f using lockf and dot locking."""
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if os.path.exists(f.name + '.lock'):
os.remove(f.name + '.lock')
def _create_carefully(path):
"""Create a file if it doesn't exist and open for reading and writing."""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0666)
try:
return open(path, 'rb+')
finally:
os.close(fd)
def _create_temporary(path):
"""Create a temp file based on path and open for reading and writing."""
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
socket.gethostname(),
os.getpid()))
def _sync_flush(f):
"""Ensure changes to file f are physically on disk."""
f.flush()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
def _sync_close(f):
"""Close file f, ensuring all changes are physically on disk."""
_sync_flush(f)
f.close()
## Start: classes from the original module (for backward compatibility).
# Note that the Maildir class, whose name is unchanged, itself offers a next()
# method for backward compatibility.
class _Mailbox:
def __init__(self, fp, factory=rfc822.Message):
self.fp = fp
self.seekp = 0
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
while 1:
self.fp.seek(self.seekp)
try:
self._search_start()
except EOFError:
self.seekp = self.fp.tell()
return None
start = self.fp.tell()
self._search_end()
self.seekp = stop = self.fp.tell()
if start != stop:
break
return self.factory(_PartialFile(self.fp, start, stop))
# Recommended to use PortableUnixMailbox instead!
class UnixMailbox(_Mailbox):
def _search_start(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
def _search_end(self):
self.fp.readline() # Throw away header line
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
# An overridable mechanism to test for From-line-ness. You can either
# specify a different regular expression or define a whole new
# _isrealfromline() method. Note that this only gets called for lines
# starting with the 5 characters "From ".
#
# BAW: According to
#http://home.netscape.com/eng/mozilla/2.0/relnotes/demo/content-length.html
# the only portable, reliable way to find message delimiters in a BSD (i.e
# Unix mailbox) style folder is to search for "\n\nFrom .*\n", or at the
# beginning of the file, "^From .*\n". While _fromlinepattern below seems
# like a good idea, in practice, there are too many variations for more
# strict parsing of the line to be completely accurate.
#
# _strict_isrealfromline() is the old version which tries to do stricter
# parsing of the From_ line. _portable_isrealfromline() simply returns
# true, since it's never called if the line doesn't already start with
# "From ".
#
# This algorithm, and the way it interacts with _search_start() and
# _search_end() may not be completely correct, because it doesn't check
# that the two characters preceding "From " are \n\n or the beginning of
# the file. Fixing this would require a more extensive rewrite than is
# necessary. For convenience, we've added a PortableUnixMailbox class
# which does no checking of the format of the 'From' line.
_fromlinepattern = (r"From \s*[^\s]+\s+\w\w\w\s+\w\w\w\s+\d?\d\s+"
r"\d?\d:\d\d(:\d\d)?(\s+[^\s]+)?\s+\d\d\d\d\s*"
r"[^\s]*\s*"
"$")
_regexp = None
def _strict_isrealfromline(self, line):
if not self._regexp:
import re
self._regexp = re.compile(self._fromlinepattern)
return self._regexp.match(line)
def _portable_isrealfromline(self, line):
return True
_isrealfromline = _strict_isrealfromline
class PortableUnixMailbox(UnixMailbox):
_isrealfromline = UnixMailbox._portable_isrealfromline
class MmdfMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == '\001\001\001\001\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\001\001\001\001\n':
self.fp.seek(pos)
return
class MHMailbox:
def __init__(self, dirname, factory=rfc822.Message):
import re
pat = re.compile('^[1-9][0-9]*$')
self.dirname = dirname
# the three following lines could be combined into:
# list = map(long, filter(pat.match, os.listdir(self.dirname)))
list = os.listdir(self.dirname)
list = filter(pat.match, list)
list = map(long, list)
list.sort()
# This only works in Python 1.6 or later;
# before that str() added 'L':
self.boxes = map(str, list)
self.boxes.reverse()
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
if not self.boxes:
return None
fn = self.boxes.pop()
fp = open(os.path.join(self.dirname, fn))
msg = self.factory(fp)
try:
msg._mh_msgno = fn
except (AttributeError, TypeError):
pass
return msg
class BabylMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line == '*** EOOH ***\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\037\014\n' or line == '\037':
self.fp.seek(pos)
return
## End: classes from the original module (for backward compatibility).
class Error(Exception):
"""Raised for module-specific errors."""
class NoSuchMailboxError(Error):
"""The specified mailbox does not exist and won't be created."""
class NotEmptyError(Error):
"""The specified mailbox is not empty and deletion was requested."""
class ExternalClashError(Error):
"""Another process caused an action to fail."""
class FormatError(Error):
"""A file appears to have an invalid format."""
| gpl-2.0 | 3,640,532,062,541,751,000 | -718,736,440,796,366,100 | 35.211211 | 87 | 0.52465 | false |
DirkWilhelmi/mikro_tms | rpi_functions.py | 1 | 1501 | import ConfigParser
from r515.basic_functions import BasicFunctions
from r515.basic_settings import BasicSettings
from r515.connection import Connection
config = ConfigParser.ConfigParser()
config.read(['r515.cfg'])
conn = Connection(config.get('Connection', 'IP'), config.get('Connection', 'USR'), config.get('Connection', 'PWD'))
prj = BasicFunctions(conn)
settings = BasicSettings(conn)
def lamp_on():
prj.power_on()
def lamp_off():
prj.power_standby()
def lamp_eighty():
settings.set_lamp_power(80.0)
def lamp_hundred():
settings.set_lamp_power(100.0)
def stop():
prj.stop()
prj.close_douser()
def pause():
prj.pause()
def play():
prj.play()
def dci_flat():
settings.load_format(1)
def dci_cs():
settings.load_format(2)
def hdmi_flat():
settings.load_format(3)
def hdmi_cs():
settings.load_format(4)
def dowser_close():
prj.close_douser()
def dowser_open():
prj.open_douser()
def start_zoom_positive():
settings.start_zoom()
def stop_zoom_positive():
settings.stop_zoom()
def start_zoom_down():
settings.start_zoom('down')
def stop_zoom_down():
settings.stop_zoom('down')
def start_focus_positive():
settings.start_focus()
def stop_focus_positive():
settings.stop_focus()
def start_focus_down():
settings.start_focus('down')
def stop_focus_down():
settings.stop_focus('down')
def is_hdmi():
return settings.get_projector_settings()['media_block'] == 'UNKNOWN' | gpl-2.0 | -4,346,397,317,189,922,000 | 2,490,181,461,457,848,000 | 14.02 | 115 | 0.67022 | false |
skoslowski/gnuradio | gr-blocks/python/blocks/qa_logger.py | 4 | 3225 | #!/usr/bin/env python
#
# Copyright 2016 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
class test_logger (gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def set_and_assert_log_level(self, block, level):
block.set_log_level(level)
self.assertEqual(block.log_level(), level)
def test_log_level_for_block(self):
# Test the python API for getting and setting log levels of individual block
# go through all of the documented log_levels
ns = blocks.null_source(1)
self.set_and_assert_log_level(ns, "notset")
self.set_and_assert_log_level(ns, "debug")
self.set_and_assert_log_level(ns, "info")
self.set_and_assert_log_level(ns, "notice")
self.set_and_assert_log_level(ns, "warn")
self.set_and_assert_log_level(ns, "error")
self.set_and_assert_log_level(ns, "crit")
self.set_and_assert_log_level(ns, "alert")
self.set_and_assert_log_level(ns, "emerg")
# There's a couple of special cases. "off" == "notset" (specific to gr)
# and "fatal" == "emerg" (built in to log4cpp)
ns.set_log_level("off")
self.assertEqual(ns.log_level(), "notset")
ns.set_log_level("fatal")
self.assertEqual(ns.log_level(), "emerg")
# Make sure exception is throw on bogus data
self.assertRaises(RuntimeError, ns.set_log_level, "11")
def test_log_level_for_tb(self):
# Test the python API for getting and setting log levels for a top_block
nsrc = blocks.null_source(4)
nsnk = blocks.null_sink(4)
# Set all log levels to a known state
nsrc.set_log_level("debug")
nsnk.set_log_level("debug")
tb = gr.top_block()
tb.connect(nsrc, nsnk)
# confirm that the tb has log_level of first block
self.assertEqual(tb.log_level(), "debug")
# confirm that changing tb log_level propagates to connected blocks
tb.set_log_level("alert")
self.assertEqual(tb.log_level(), "alert")
self.assertEqual(nsrc.log_level(), "alert")
self.assertEqual(nsnk.log_level(), "alert")
def test_log_level_for_hier_block(self):
# Test the python API for getting and setting log levels for hier blocks
nsrc = blocks.null_source(4)
nsnk = blocks.null_sink(4)
b = blocks.stream_to_vector_decimator(4, 1, 1, 1) # just a random hier block that exists
tb = gr.top_block()
tb.connect(nsrc, b, nsnk)
tb.set_log_level("debug")
self.assertEqual(tb.log_level(), "debug")
self.assertEqual(nsrc.log_level(), "debug")
self.assertEqual(nsnk.log_level(), "debug")
self.assertEqual(b.one_in_n.log_level(), "debug")
tb.set_log_level("alert")
self.assertEqual(tb.log_level(), "alert")
self.assertEqual(nsrc.log_level(), "alert")
self.assertEqual(nsnk.log_level(), "alert")
self.assertEqual(b.one_in_n.log_level(), "alert")
if __name__ == '__main__':
gr_unittest.run(test_logger, "test_logger.xml")
| gpl-3.0 | -5,142,059,494,874,529,000 | 5,772,478,959,962,942,000 | 36.941176 | 96 | 0.618605 | false |
mfherbst/spack | var/spack/repos/builtin.mock/packages/othervirtual/package.py | 5 | 1509 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Othervirtual(Package):
homepage = "http://somewhere.com"
url = "http://somewhere.com/stuff-1.0.tar.gz"
version('1.0', '67890abcdef1234567890abcdef12345')
provides('stuff')
def install(self, spec, prefix):
pass
| lgpl-2.1 | -4,594,939,439,995,252,000 | -3,085,082,261,697,463,000 | 39.783784 | 78 | 0.666667 | false |
dr4g0nsr/phoenix-kodi-addon | plugin.video.phstreams/resources/lib/sources/muchmoviesv2_mv_tv.py | 7 | 7118 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import cleantitle
from resources.lib.libraries import cloudflare
from resources.lib.libraries import client
class source:
def __init__(self):
self.base_link = 'http://123movies.to'
self.search_link = '/movie/search/%s'
def get_movie(self, imdb, title, year):
try:
query = self.search_link % urllib.quote(title)
query = urlparse.urljoin(self.base_link, query)
result = cloudflare.source(query)
title = cleantitle.movie(title)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [i for i in result if any(x in i[2] for x in years)]
result = [(i[0], re.sub('\d{4}$', '', i[1]).strip()) for i in result]
result = [i[0] for i in result if title == cleantitle.movie(i[1])][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
url = '%s (%s)' % (tvshowtitle, year)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
tvshowtitle, year = re.compile('(.+?) [(](\d{4})[)]$').findall(url)[0]
query = self.search_link % urllib.quote(tvshowtitle)
query = urlparse.urljoin(self.base_link, query)
result = cloudflare.source(query)
tvshowtitle = cleantitle.tv(tvshowtitle)
season = '%01d' % int(season)
episode = '%01d' % int(episode)
years = ['%s' % str(year), '%s' % str(int(year)+1), '%s' % str(int(year)-1)]
result = client.parseDOM(result, 'div', attrs = {'class': 'ml-item'})
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'h2'), re.compile('class *= *[\'|\"]jt-info[\'|\"]>(\d{4})<').findall(i)) for i in result]
result = [(i[0][0], i[1][0], i[2][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0 and len(i[2]) > 0]
result = [(i[0], re.compile('(.+?) - Season (\d*)$').findall(i[1]), i[2]) for i in result]
result = [(i[0], i[1][0][0], i[1][0][1], i[2]) for i in result if len(i[1]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i for i in result if season == i[2]]
result = [(i[0], i[1], str(int(i[3]) - int(i[2]) + 1)) for i in result]
result = [i[0] for i in result if any(x in i[2] for x in years)][0]
result += '?S%02dE%02d' % (int(season), int(episode))
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
content = re.compile('(.+?)\?S\d*E\d*$').findall(url)
try: url, season, episode = re.compile('(.+?)\?S(\d*)E(\d*)$').findall(url)[0]
except: pass
url = urlparse.urljoin(self.base_link, url)
url = urlparse.urljoin(url, 'watching.html')
referer = url
result = cloudflare.source(url)
try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0]
except: quality = 'HD'
if '1080p' in quality: quality = '1080p'
else: quality = 'HD'
url = re.compile('var\s+url_playlist *= *"(.+?)"').findall(result)[0]
result = client.parseDOM(result, 'div', attrs = {'class': 'les-content'})
result = zip(client.parseDOM(result, 'a', ret='onclick'), client.parseDOM(result, 'a'))
result = [(i[0], re.compile('(\d+)').findall(i[1])) for i in result]
result = [(i[0], '%01d' % int(i[1][0])) for i in result if len(i[1]) > 0]
result = [(i[0], i[1]) for i in result]
result = [(re.compile('(\d+)').findall(i[0]), i[1]) for i in result]
result = [('%s/%s/%s' % (url, i[0][0], i[0][1]), i[1]) for i in result]
if len(content) == 0:
url = [i[0] for i in result]
else:
episode = '%01d' % int(episode)
url = [i[0] for i in result if episode == i[1]]
url = ['%s|User-Agent=%s&Referer=%s' % (i, urllib.quote_plus(client.agent()), urllib.quote_plus(referer)) for i in url]
for u in url: sources.append({'source': 'Muchmovies', 'quality': quality, 'provider': 'Muchmoviesv2', 'url': u})
return sources
except:
return sources
def resolve(self, url):
try:
url, headers = url.split('|')
idx = int(re.compile('/(\d+)').findall(url)[-1])
result = cloudflare.request(url)
url = client.parseDOM(result, 'item')
url = [i for i in url if not 'youtube.com' in i and not '>Intro<' in i][idx]
url = re.compile("file *= *[\'|\"](.+?)[\'|\"]").findall(url)
url = [i for i in url if not i.endswith('.srt')][0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if 'google' in url:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
else:
url = '%s|%s' % (url, headers)
return url
except:
return
| gpl-2.0 | -8,552,426,145,992,106,000 | -7,230,975,024,751,110,000 | 37.684783 | 169 | 0.52304 | false |
raybuhr/grab | test/spider_stat.py | 12 | 1676 | import six
from grab.spider import Spider, Task
from tempfile import mkstemp
import os
from six import StringIO
import mock
import sys
from test.util import TMP_DIR
from test.util import BaseGrabTestCase, build_spider
class BasicSpiderTestCase(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_stop_timer_invalid_input(self):
class TestSpider(Spider):
pass
bot = build_spider(TestSpider)
self.assertRaises(KeyError, bot.timer.stop, 'zzz')
def test_counters_and_collections(self):
class TestSpider(Spider):
def prepare(self):
self.stat.logging_period = 0
self.stat.inc('foo')
def task_page_valid(self, grab, task):
self.stat.inc('foo')
def task_page_fail(self, grab, task):
1/0
bot = build_spider(TestSpider)
bot.setup_queue()
bot.add_task(Task('page_valid', url=self.server.get_url()))
bot.add_task(Task('page_fail', url=self.server.get_url()))
bot.run()
self.assertEqual(2, bot.stat.counters['foo'])
self.assertEqual(1, len(bot.stat.collections['fatal']))
def test_render_stats(self):
class TestSpider(Spider):
def prepare(self):
self.stat.logging_period = 0
self.stat.inc('foo')
def task_page(self, grab, task):
pass
bot = build_spider(TestSpider)
bot.setup_queue()
bot.add_task(Task('page', url=self.server.get_url()))
bot.run()
stats = bot.render_stats()
stats = bot.render_stats(timing=True)
| mit | 174,898,619,318,509,900 | 6,175,297,369,919,454,000 | 27.896552 | 67 | 0.592482 | false |
newswangerd/ansible | test/units/module_utils/test_distro.py | 35 | 1512 |
# (c) 2018 Adrian Likins <[email protected]>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# or
# Apache License v2.0 (see http://www.apache.org/licenses/LICENSE-2.0)
#
# Dual licensed so any test cases could potentially be included by the upstream project
# that module_utils/distro.py is from (https://github.com/nir0s/distro)
# Note that nir0s/distro has many more tests in it's test suite. The tests here are
# primarily for testing the vendoring.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils import distro
from ansible.module_utils.six import string_types
# Generic test case with minimal assertions about specific returned values.
class TestDistro():
# should run on any platform without errors, even if non-linux without any
# useful info to return
def test_info(self):
info = distro.info()
assert isinstance(info, dict), \
'distro.info() returned %s (%s) which is not a dist' % (info, type(info))
def test_linux_distribution(self):
linux_dist = distro.linux_distribution()
assert isinstance(linux_dist, tuple), \
'linux_distrution() returned %s (%s) which is not a tuple' % (linux_dist, type(linux_dist))
def test_id(self):
id = distro.id()
assert isinstance(id, string_types), 'distro.id() returned %s (%s) which is not a string' % (id, type(id))
| gpl-3.0 | -6,712,252,364,421,977,000 | 8,495,894,992,464,263,000 | 38.789474 | 114 | 0.69246 | false |
nozuono/calibre-webserver | src/calibre/ebooks/rtf2xml/group_styles.py | 24 | 8744 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys, os, re
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
class GroupStyles:
"""
Form lists.
Use RTF's own formatting to determine if a paragraph definition is part of a
list.
Use indents to determine items and how lists are nested.
"""
def __init__(self,
in_file,
bug_handler,
copy = None,
run_level = 1,
wrap = 0,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__wrap = wrap
def __initiate_values(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
The self.__end_list is a list of tokens that will force a list to end.
Likewise, the self.__end_lines is a list of lines that forces a list to end.
"""
self.__state = "default"
self.__left_indent = 0
self.__list_type = 'not-defined'
self.__pard_def = ""
self.__all_lists = []
self.__list_chunk = ''
self.__state_dict={
'default' : self.__default_func,
'in_pard' : self.__in_pard_func,
'after_pard' : self.__after_pard_func,
}
# section end
self.__end_list = [
# section end
'mi<mk<sect-close',
'mi<mk<sect-start',
# table begin
'mi<mk<tabl-start',
# field block begin
'mi<mk<fldbk-end_',
'mi<mk<fldbkstart',
# cell end
'mi<mk<close_cell',
# item end
'mi<tg<item_end__',
# footnote end
'mi<mk<foot___clo',
'mi<mk<footnt-ope',
# heading end
'mi<mk<header-beg',
'mi<mk<header-end',
'mi<mk<head___clo',
# lists
'mi<tg<item_end__',
'mi<tg<item_end__',
'mi<mk<list_start'
# body close
# don't use
# 'mi<mk<body-close',
# 'mi<mk<par-in-fld',
# 'cw<tb<cell______',
# 'cw<tb<row-def___',
# 'cw<tb<row_______',
# 'mi<mk<sec-fd-beg',
]
self.__name_regex = re.compile(r'<name>')
self.__found_appt = 0
self.__line_num = 0
def __in_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
You are in a list, but in the middle of a paragraph definition.
Don't do anything until you find the end of the paragraph definition.
"""
if self.__token_info == 'mi<tg<close_____' \
and line[17:-1] == 'paragraph-definition':
self.__state = 'after_pard'
else:
self.__write_obj.write(line)
def __after_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
"""
if self.__token_info == 'mi<tg<open-att__' \
and line[17:37] == 'paragraph-definition':
# found paragraph definition
self.__pard_after_par_def_func(line)
elif self.__token_info == 'mi<tg<close_____' \
and line[17:-1] == 'paragraph-definition':
sys.stderr.write('Wrong flag in __after_pard_func\n')
if self.__run_level > 2:
msg = 'wrong flag'
raise self.__bug_handler, msg
elif self.__token_info in self.__end_list:
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'default'
self.__write_obj.write(line)
else:
self.__list_chunk += line
def __close_pard_(self, line):
self.__write_obj.write(self.__list_chunk)
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__list_chunk = ''
self.__state = 'default'
def __write_start_wrap(self, name):
if self.__wrap:
self.__write_obj.write('mi<mk<style-grp_<%s\n' % name)
self.__write_obj.write('mi<tg<open-att__<style-group<name>%s\n' % name)
self.__write_obj.write('mi<mk<style_grp_<%s\n' % name)
def __write_end_wrap(self):
if self.__wrap:
self.__write_obj.write('mi<mk<style_gend\n' )
self.__write_obj.write('mi<tg<close_____<style-group\n')
self.__write_obj.write('mi<mk<stylegend_\n' )
def __pard_after_par_def_func(self, line):
"""
Required:
line -- the line of current text.
id -- the id of the current list
Return:
Nothing
Logic:
"""
if self.__last_style_name == self.__style_name:
# just keep going
if self.__wrap:
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'in_pard'
if self.__wrap:
self.__write_obj.write(line)
else:
# different name for the paragraph definition
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__write_obj.write(self.__list_chunk)
self.__write_start_wrap(self.__style_name)
self.__write_obj.write(line)
self.__state = 'in_pard'
self.__last_style_name = self.__style_name
self.__list_chunk = ''
def __default_func(self, line):
"""
Required:
self, line
Returns:
Nothing
Logic
Look for the start of a paragraph defintion. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to
in_pard.
"""
if self.__token_info == 'mi<tg<open-att__' \
and line[17:37] == 'paragraph-definition':
self.__state = 'in_pard'
self.__last_style_name = self.__style_name
self.__write_start_wrap(self.__last_style_name)
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
def __get_style_name(self, line):
if self.__token_info == 'mi<mk<style-name':
self.__style_name = line[17:-1]
def group_styles(self):
"""
Required:
nothing
Returns:
original file will be changed
Logic:
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
self.__get_style_name(line)
action = self.__state_dict.get(self.__state)
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "group_styles.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| gpl-3.0 | -3,819,592,692,799,159,300 | 3,625,354,925,115,259,400 | 35.739496 | 88 | 0.470723 | false |
gaddman/ansible | lib/ansible/modules/cloud/cloudstack/cs_securitygroup.py | 78 | 5313 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_securitygroup
short_description: Manages security groups on Apache CloudStack based clouds.
description:
- Create and remove security groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the security group.
required: true
description:
description:
- Description of the security group.
state:
description:
- State of the security group.
default: present
choices: [ present, absent ]
domain:
description:
- Domain the security group is related to.
account:
description:
- Account the security group is related to.
project:
description:
- Name of the project the security group to be created in.
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: create a security group
local_action:
module: cs_securitygroup
name: default
description: default security group
- name: remove a security group
local_action:
module: cs_securitygroup
name: default
state: absent
'''
RETURN = '''
---
id:
description: UUID of the security group.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of security group.
returned: success
type: string
sample: app
description:
description: Description of security group.
returned: success
type: string
sample: application security group
tags:
description: List of resource tags associated with the security group.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
project:
description: Name of project the security group is related to.
returned: success
type: string
sample: Production
domain:
description: Domain the security group is related to.
returned: success
type: string
sample: example domain
account:
description: Account the security group is related to.
returned: success
type: string
sample: example account
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together
class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSecurityGroup, self).__init__(module)
self.security_group = None
def get_security_group(self):
if not self.security_group:
args = {
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'securitygroupname': self.module.params.get('name'),
}
sgs = self.query_api('listSecurityGroups', **args)
if sgs:
self.security_group = sgs['securitygroup'][0]
return self.security_group
def create_security_group(self):
security_group = self.get_security_group()
if not security_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'description': self.module.params.get('description'),
}
if not self.module.check_mode:
res = self.query_api('createSecurityGroup', **args)
security_group = res['securitygroup']
return security_group
def remove_security_group(self):
security_group = self.get_security_group()
if security_group:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'projectid': self.get_project(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
}
if not self.module.check_mode:
self.query_api('deleteSecurityGroup', **args)
return security_group
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
state=dict(choices=['present', 'absent'], default='present'),
project=dict(),
account=dict(),
domain=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_sg = AnsibleCloudStackSecurityGroup(module)
state = module.params.get('state')
if state in ['absent']:
sg = acs_sg.remove_security_group()
else:
sg = acs_sg.create_security_group()
result = acs_sg.get_result(sg)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,821,417,086,873,225,200 | -2,757,855,429,636,860,400 | 26.518135 | 101 | 0.624176 | false |
ThiagoGarciaAlves/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/patcomp.py | 304 | 7091 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Pattern compiler.
The grammer is taken from PatternGrammar.txt.
The compiler compiles a pattern to a pytree.*Pattern instance.
"""
__author__ = "Guido van Rossum <[email protected]>"
# Python imports
import os
import StringIO
# Fairly local imports
from .pgen2 import driver, literals, token, tokenize, parse, grammar
# Really local imports
from . import pytree
from . import pygram
# The pattern grammar file
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class PatternSyntaxError(Exception):
pass
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple
class PatternCompiler(object):
def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
self.grammar = driver.load_grammar(grammar_file)
self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
def compile_pattern(self, input, debug=False, with_tree=False):
"""Compiles a pattern string to a nested pytree.*Pattern object."""
tokens = tokenize_wrapper(input)
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
raise PatternSyntaxError(str(e))
if with_tree:
return self.compile_node(root), root
else:
return self.compile_node(root)
def compile_node(self, node):
"""Compiles a node, recursively.
This is one big switch on the node type.
"""
# XXX Optimize certain Wildcard-containing-Wildcard patterns
# that can be merged
if node.type == self.syms.Matcher:
node = node.children[0] # Avoid unneeded recursion
if node.type == self.syms.Alternatives:
# Skip the odd children since they are just '|' tokens
alts = [self.compile_node(ch) for ch in node.children[::2]]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
return p.optimize()
if node.type == self.syms.Alternative:
units = [self.compile_node(ch) for ch in node.children]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
assert node.type == self.syms.Unit
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
assert repeat.type == self.syms.Repeater
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
assert children[-1].type == token.RBRACE
assert len(children) in (3, 5)
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
else:
assert False
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize()
def compile_basic(self, nodes, repeat=None):
# Compile STRING | NAME [Details] | (...) | [...]
assert len(nodes) >= 1
node = nodes[0]
if node.type == token.STRING:
value = unicode(literals.evalString(node.value))
return pytree.LeafPattern(_type_of_literal(value), value)
elif node.type == token.NAME:
value = node.value
if value.isupper():
if value not in TOKEN_MAP:
raise PatternSyntaxError("Invalid token: %r" % value)
if nodes[1:]:
raise PatternSyntaxError("Can't have details for token")
return pytree.LeafPattern(TOKEN_MAP[value])
else:
if value == "any":
type = None
elif not value.startswith("_"):
type = getattr(self.pysyms, value, None)
if type is None:
raise PatternSyntaxError("Invalid symbol: %r" % value)
if nodes[1:]: # Details present
content = [self.compile_node(nodes[1].children[1])]
else:
content = None
return pytree.NodePattern(type, content)
elif node.value == "(":
return self.compile_node(nodes[1])
elif node.value == "[":
assert repeat is None
subpattern = self.compile_node(nodes[1])
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
assert False, node
def get_int(self, node):
assert node.type == token.NUMBER
return int(node.value)
# Map named tokens to the type value for a LeafPattern
TOKEN_MAP = {"NAME": token.NAME,
"STRING": token.STRING,
"NUMBER": token.NUMBER,
"TOKEN": None}
def _type_of_literal(value):
if value[0].isalpha():
return token.NAME
elif value in grammar.opmap:
return grammar.opmap[value]
else:
return None
def pattern_convert(grammar, raw_node_info):
"""Converts raw node information to a Node or Leaf instance."""
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context)
def compile_pattern(pattern):
return PatternCompiler().compile_pattern(pattern)
| apache-2.0 | -3,728,615,406,081,622,000 | -5,963,105,322,939,093,000 | 33.590244 | 79 | 0.573121 | false |
cz-maria/Pawel_the_Mage | wrogowie.py | 1 | 3434 | import pygame
import stale
class Enemy(pygame.sprite.Sprite):
walking_frames_l = []
walking_frames_r = []
def __init__(self):
super().__init__()
image = pygame.image.load("knight.png").convert()
image.set_colorkey(stale.WHITE)
self.walking_frames_l.append(image)
image = pygame.transform.flip(image, True, False)
self.walking_frames_r.append(image)
image = pygame.image.load("knight2.png").convert()
image.set_colorkey(stale.WHITE)
self.walking_frames_l.append(image)
image = pygame.transform.flip(image, True, False)
self.walking_frames_r.append(image)
self.image = knight_image = self.walking_frames_l[0]
self.rect = self.image.get_rect()
# Szybkosc rycerza
self.change_x = -1
self.change_y = 0
# List of sprites we can bump against
self.level = None
def update(self):
""" Ruch rycerza """
# Gravity
self.calc_grav()
# Move left/right
self.rect.x += self.change_x
pos = self.rect.x
if self.change_x < 0:
frame = (pos // 20) % len(self.walking_frames_l)
self.image = self.walking_frames_l[frame]
else:
frame = (pos // 20) % len(self.walking_frames_r)
self.image = self.walking_frames_r[frame]
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If we are moving right,
# set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
self.change_x = - self.change_x
elif self.change_x < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
self.change_x = -self.change_x
self.direction = "R"
if self.rect.x > stale.SCREEN_WIDTH - self.rect.width or self.rect.x < 0:
self.change_x = - self.change_x
# Move up/down
self.rect.y += self.change_y
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
if self.change_x > 0 and self.rect.x > block.rect.right - 85:
self.change_x = -1
elif self.change_x < 0 and self.rect.x < block.rect.left:
self.change_x = 1
elif self.change_y < 0:
self.rect.top = block.rect.bottom
# Stop our vertical movement
self.change_y = 0
def calc_grav(self):
""" Calculate effect of gravity. """
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# See if we are on the ground.
if self.rect.y >= stale.SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = stale.SCREEN_HEIGHT - self.rect.height
class Boss(Enemy):
def __init__(self, player):
super().__init__()
| mit | 4,080,467,603,515,169,000 | -2,903,491,754,515,794,000 | 32.666667 | 91 | 0.547175 | false |
holygits/incubator-airflow | tests/contrib/hooks/test_zendesk_hook.py | 37 | 3566 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import mock
from airflow.hooks.zendesk_hook import ZendeskHook
from zdesk import RateLimitError
class TestZendeskHook(unittest.TestCase):
@mock.patch("airflow.hooks.zendesk_hook.time")
def test_sleeps_for_correct_interval(self, mocked_time):
sleep_time = 10
# To break out of the otherwise infinite tries
mocked_time.sleep = mock.Mock(side_effect=ValueError, return_value=3)
conn_mock = mock.Mock()
mock_response = mock.Mock()
mock_response.headers.get.return_value = sleep_time
conn_mock.call = mock.Mock(
side_effect=RateLimitError(msg="some message", code="some code",
response=mock_response))
zendesk_hook = ZendeskHook("conn_id")
zendesk_hook.get_conn = mock.Mock(return_value=conn_mock)
with self.assertRaises(ValueError):
zendesk_hook.call("some_path", get_all_pages=False)
mocked_time.sleep.assert_called_with(sleep_time)
@mock.patch("airflow.hooks.zendesk_hook.Zendesk")
def test_returns_single_page_if_get_all_pages_false(self, _):
zendesk_hook = ZendeskHook("conn_id")
mock_connection = mock.Mock()
mock_connection.host = "some_host"
zendesk_hook.get_connection = mock.Mock(return_value=mock_connection)
zendesk_hook.get_conn()
mock_conn = mock.Mock()
mock_call = mock.Mock(
return_value={'next_page': 'https://some_host/something', 'path':
[]})
mock_conn.call = mock_call
zendesk_hook.get_conn = mock.Mock(return_value=mock_conn)
zendesk_hook.call("path", get_all_pages=False)
mock_call.assert_called_once_with("path", None)
@mock.patch("airflow.hooks.zendesk_hook.Zendesk")
def test_returns_multiple_pages_if_get_all_pages_true(self, _):
zendesk_hook = ZendeskHook("conn_id")
mock_connection = mock.Mock()
mock_connection.host = "some_host"
zendesk_hook.get_connection = mock.Mock(return_value=mock_connection)
zendesk_hook.get_conn()
mock_conn = mock.Mock()
mock_call = mock.Mock(
return_value={'next_page': 'https://some_host/something', 'path': []})
mock_conn.call = mock_call
zendesk_hook.get_conn = mock.Mock(return_value=mock_conn)
zendesk_hook.call("path", get_all_pages=True)
assert mock_call.call_count == 2
@mock.patch("airflow.hooks.zendesk_hook.Zendesk")
def test_zdesk_is_inited_correctly(self, mock_zendesk):
conn_mock = mock.Mock()
conn_mock.host = "conn_host"
conn_mock.login = "conn_login"
conn_mock.password = "conn_pass"
zendesk_hook = ZendeskHook("conn_id")
zendesk_hook.get_connection = mock.Mock(return_value=conn_mock)
zendesk_hook.get_conn()
mock_zendesk.assert_called_with('https://conn_host', 'conn_login',
'conn_pass', True)
| apache-2.0 | 6,901,377,845,651,897,000 | 8,838,073,835,194,865,000 | 39.067416 | 82 | 0.645822 | false |
AmeristralianDollar/AmeristralianDollar | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit | 9,031,094,754,121,210,000 | 492,509,403,505,293,100 | 24.611111 | 80 | 0.571584 | false |
pde/tosback2 | web-frontend/beautifulsoup4/bs4/builder/_htmlparser.py | 9 | 7507 | """Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import HTMLParser
import sys
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
self.soup.handle_starttag(name, None, None, dict(attrs))
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
return markup, None, None, False
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
return (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
parser.feed(markup)
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| gpl-2.0 | 2,139,716,139,520,796,000 | 7,402,223,554,341,734,000 | 31.925439 | 77 | 0.564673 | false |
I-prefer-the-front-end/I-prefer-the-front-end | iptfe/blog/views.py | 1 | 1283 | from django.shortcuts import render
from django.http import HttpResponse
from blog.models import Post, Comment
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
def archive(request):
all_posts = Post.objects.all();
context = {}
context['posts'] = all_posts
context['count'] = all_posts.count()
context['page'] = 'archive'
return render(request, 'blog/archive.html', context)
def post(request, slug):
if slug:
post = Post.objects.filter(slug = slug).first()
else:
post = Post.objects.latest();
context = {}
context['title'] = post.title
context['post_date'] = post.post_date
context['content'] = post.content
context['comments'] = post.comment_set.all()
return render(request, 'blog/post.html', context)
def comment(request):
comment = request.POST.get("comment", "")
author = request.POST.get("author", "")
email = request.POST.get("email", "")
post_title = request.POST.get("post", "")
post = Post.objects.filter(title = post_title).first()
user_comment = Comment(author=author, comment=comment, email=email, post=post)
user_comment.save()
return HttpResponseRedirect(reverse('blog:post', args=(post.slug,)))
| mit | 6,133,237,243,800,579,000 | -1,610,754,282,111,818,800 | 28.837209 | 82 | 0.670304 | false |
jeromeetienne/neoip | libxml2-2.6.27/python/tests/ctxterror.py | 87 | 1318 | #!/usr/bin/python -u
#
# This test exercise the redirection of error messages with a
# functions defined in Python.
#
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
expect="""--> (3) xmlns: URI foo is not absolute
--> (4) Opening and ending tag mismatch: x line 0 and y
"""
err=""
def callback(arg,msg,severity,reserved):
global err
err = err + "%s (%d) %s" % (arg,severity,msg)
s = """<x xmlns="foo"></y>"""
parserCtxt = libxml2.createPushParser(None,"",0,"test.xml")
parserCtxt.setErrorHandler(callback, "-->")
if parserCtxt.getErrorHandler() != (callback,"-->"):
print "getErrorHandler failed"
sys.exit(1)
parserCtxt.parseChunk(s,len(s),1)
doc = parserCtxt.doc()
doc.freeDoc()
parserCtxt = None
if err != expect:
print "error"
print "received %s" %(err)
print "expected %s" %(expect)
sys.exit(1)
i = 10000
while i > 0:
parserCtxt = libxml2.createPushParser(None,"",0,"test.xml")
parserCtxt.setErrorHandler(callback, "-->")
parserCtxt.parseChunk(s,len(s),1)
doc = parserCtxt.doc()
doc.freeDoc()
parserCtxt = None
err = ""
i = i - 1
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
| gpl-3.0 | 4,406,658,428,427,257,300 | -8,603,433,467,094,520,000 | 22.535714 | 63 | 0.657056 | false |
l0b0/tilde | .config/calibre/global.py | 1 | 3641 | # calibre wide preferences
### Begin group: DEFAULT
# database path
# Path to the database in which books are stored
database_path = '/home/username/library1.db'
# filename pattern
# Pattern to guess metadata from filenames
filename_pattern = u'(?P<title>.+) - (?P<author>[^_]+)'
# isbndb com key
# Access key for isbndb.com
isbndb_com_key = ''
# network timeout
# Default timeout for network operations (seconds)
network_timeout = 5
# library path
# Path to directory in which your library of books is stored
library_path = u'.calibre'
# language
# The language in which to display the user interface
language = 'en'
# output format
# The default output format for ebook conversions.
output_format = 'epub'
# input format order
# Ordered list of formats to prefer for input.
input_format_order = cPickle.loads('\x80\x02]q\x01(U\x04EPUBq\x02U\x04AZW3q\x03U\x04MOBIq\x04U\x03LITq\x05U\x03PRCq\x06U\x03FB2q\x07U\x04HTMLq\x08U\x03HTMq\tU\x04XHTMq\nU\x05SHTMLq\x0bU\x05XHTMLq\x0cU\x03ZIPq\rU\x03ODTq\x0eU\x03RTFq\x0fU\x03PDFq\x10U\x03TXTq\x11e.')
# read file metadata
# Read metadata from files
read_file_metadata = True
# worker process priority
# The priority of worker processes. A higher priority means they run faster and consume more resources. Most tasks like conversion/news download/adding books/etc. are affected by this setting.
worker_process_priority = 'normal'
# swap author names
# Swap author first and last names when reading metadata
swap_author_names = False
# add formats to existing
# Add new formats to existing book records
add_formats_to_existing = False
# check for dupes on ctl
# Check for duplicates when copying to another library
check_for_dupes_on_ctl = False
# installation uuid
# Installation UUID
installation_uuid = '33ac5ee3-f090-4bf9-a6e0-322a803d5c7f'
# new book tags
# Tags to apply to books added to the library
new_book_tags = cPickle.loads('\x80\x02]q\x01.')
# mark new books
# Mark newly added books. The mark is a temporary mark that is automatically removed when calibre is restarted.
mark_new_books = False
# saved searches
# List of named saved searches
saved_searches = cPickle.loads('\x80\x02}q\x01.')
# user categories
# User-created tag browser categories
user_categories = cPickle.loads('\x80\x02}q\x01.')
# manage device metadata
# How and when calibre updates metadata on the device.
manage_device_metadata = 'manual'
# limit search columns
# When searching for text without using lookup prefixes, as for example, Red instead of title:Red, limit the columns searched to those named below.
limit_search_columns = False
# limit search columns to
# Choose columns to be searched when not using prefixes, as for example, when searching for Red instead of title:Red. Enter a list of search/lookup names separated by commas. Only takes effect if you set the option to limit search columns above.
limit_search_columns_to = cPickle.loads('\x80\x02]q\x01(U\x05titleq\x02U\x07authorsq\x03U\x04tagsq\x04U\x06seriesq\x05U\tpublisherq\x06e.')
# use primary find in search
# Characters typed in the search box will match their accented versions, based on the language you have chosen for the calibre interface. For example, in English, searching for n will match both ñ and n, but if your language is Spanish it will only match n. Note that this is much slower than a simple search on very large libraries. Also, this option will have no effect if you turn on case-sensitive searching
use_primary_find_in_search = True
# case sensitive
# Make searches case-sensitive
case_sensitive = False
# migrated
# For Internal use. Don't modify.
migrated = False
| gpl-3.0 | -1,377,347,539,044,766,000 | 3,794,808,671,359,909,400 | 34.686275 | 411 | 0.765659 | false |
philippze/django-cms | menus/templatetags/menu_tags.py | 35 | 14631 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from classytags.arguments import IntegerArgument, Argument, StringArgument
from classytags.core import Options
from classytags.helpers import InclusionTag
from cms.utils.i18n import force_language, get_language_objects
from django import template
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import get_language, ugettext
from menus.menu_pool import menu_pool
from menus.utils import DefaultLanguageChanger
register = template.Library()
class NOT_PROVIDED:
pass
def cut_after(node, levels, removed):
"""
given a tree of nodes cuts after N levels
"""
if levels == 0:
removed.extend(node.children)
node.children = []
else:
removed_local = []
for child in node.children:
if child.visible:
cut_after(child, levels - 1, removed)
else:
removed_local.append(child)
for removed_child in removed_local:
node.children.remove(removed_child)
removed.extend(removed_local)
def remove(node, removed):
removed.append(node)
if node.parent:
if node in node.parent.children:
node.parent.children.remove(node)
def cut_levels(nodes, from_level, to_level, extra_inactive, extra_active):
"""
cutting nodes away from menus
"""
final = []
removed = []
selected = None
for node in nodes:
if not hasattr(node, 'level'):
# remove and ignore nodes that don't have level information
remove(node, removed)
continue
if node.level == from_level:
# turn nodes that are on from_level into root nodes
final.append(node)
node.parent = None
if not node.ancestor and not node.selected and not node.descendant:
# cut inactive nodes to extra_inactive, but not of descendants of
# the selected node
cut_after(node, extra_inactive, removed)
if node.level > to_level and node.parent:
# remove nodes that are too deep, but not nodes that are on
# from_level (local root nodes)
remove(node, removed)
if node.selected:
selected = node
if not node.visible:
remove(node, removed)
if selected:
cut_after(selected, extra_active, removed)
if removed:
for node in removed:
if node in final:
final.remove(node)
return final
def flatten(nodes):
flat = []
for node in nodes:
flat.append(node)
flat.extend(flatten(node.children))
return flat
class ShowMenu(InclusionTag):
"""
render a nested list of all children of the pages
- from_level: starting level
- to_level: max level
- extra_inactive: how many levels should be rendered of the not active tree?
- extra_active: how deep should the children of the active node be rendered?
- namespace: the namespace of the menu. if empty will use all namespaces
- root_id: the id of the root node
- template: template used to render the menu
"""
name = 'show_menu'
template = 'menu/dummy.html'
options = Options(
IntegerArgument('from_level', default=0, required=False),
IntegerArgument('to_level', default=100, required=False),
IntegerArgument('extra_inactive', default=0, required=False),
IntegerArgument('extra_active', default=1000, required=False),
StringArgument('template', default='menu/menu.html', required=False),
StringArgument('namespace', default=None, required=False),
StringArgument('root_id', default=None, required=False),
Argument('next_page', default=None, required=False),
)
def get_context(self, context, from_level, to_level, extra_inactive,
extra_active, template, namespace, root_id, next_page):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'menu/empty.html'}
if next_page:
children = next_page.children
else:
# new menu... get all the data so we can save a lot of queries
nodes = menu_pool.get_nodes(request, namespace, root_id)
if root_id: # find the root id and cut the nodes
id_nodes = menu_pool.get_nodes_by_attribute(nodes, "reverse_id", root_id)
if id_nodes:
node = id_nodes[0]
nodes = node.children
for remove_parent in nodes:
remove_parent.parent = None
from_level += node.level + 1
to_level += node.level + 1
nodes = flatten(nodes)
else:
nodes = []
children = cut_levels(nodes, from_level, to_level, extra_inactive, extra_active)
children = menu_pool.apply_modifiers(children, request, namespace, root_id, post_cut=True)
try:
context['children'] = children
context['template'] = template
context['from_level'] = from_level
context['to_level'] = to_level
context['extra_inactive'] = extra_inactive
context['extra_active'] = extra_active
context['namespace'] = namespace
except:
context = {"template": template}
return context
register.tag(ShowMenu)
class ShowMenuBelowId(ShowMenu):
name = 'show_menu_below_id'
options = Options(
Argument('root_id', default=None, required=False),
IntegerArgument('from_level', default=0, required=False),
IntegerArgument('to_level', default=100, required=False),
IntegerArgument('extra_inactive', default=0, required=False),
IntegerArgument('extra_active', default=1000, required=False),
Argument('template', default='menu/menu.html', required=False),
Argument('namespace', default=None, required=False),
Argument('next_page', default=None, required=False),
)
register.tag(ShowMenuBelowId)
class ShowSubMenu(InclusionTag):
"""
show the sub menu of the current nav-node.
- levels: how many levels deep
- root_level: the level to start the menu at
- nephews: the level of descendants of siblings (nephews) to show
- template: template used to render the navigation
"""
name = 'show_sub_menu'
template = 'menu/dummy.html'
options = Options(
IntegerArgument('levels', default=100, required=False),
Argument('root_level', default=None, required=False),
IntegerArgument('nephews', default=100, required=False),
Argument('template', default='menu/sub_menu.html', required=False),
)
def get_context(self, context, levels, root_level, nephews, template):
# Django 1.4 doesn't accept 'None' as a tag value and resolve to ''
# So we need to force it to None again
if not root_level and root_level != 0:
root_level = None
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'menu/empty.html'}
nodes = menu_pool.get_nodes(request)
children = []
# adjust root_level so we cut before the specified level, not after
include_root = False
if root_level is not None and root_level > 0:
root_level -= 1
elif root_level is not None and root_level == 0:
include_root = True
for node in nodes:
if root_level is None:
if node.selected:
# if no root_level specified, set it to the selected nodes level
root_level = node.level
# is this the ancestor of current selected node at the root level?
is_root_ancestor = (node.ancestor and node.level == root_level)
# is a node selected on the root_level specified
root_selected = (node.selected and node.level == root_level)
if is_root_ancestor or root_selected:
cut_after(node, levels, [])
children = node.children
for child in children:
if child.sibling:
cut_after(child, nephews, [])
# if root_level was 0 we need to give the menu the entire tree
# not just the children
if include_root:
children = menu_pool.apply_modifiers([node], request, post_cut=True)
else:
children = menu_pool.apply_modifiers(children, request, post_cut=True)
context['children'] = children
context['template'] = template
context['from_level'] = 0
context['to_level'] = 0
context['extra_inactive'] = 0
context['extra_active'] = 0
return context
register.tag(ShowSubMenu)
class ShowBreadcrumb(InclusionTag):
"""
Shows the breadcrumb from the node that has the same url as the current request
- start level: after which level should the breadcrumb start? 0=home
- template: template used to render the breadcrumb
"""
name = 'show_breadcrumb'
template = 'menu/dummy.html'
options = Options(
Argument('start_level', default=0, required=False),
Argument('template', default='menu/breadcrumb.html', required=False),
Argument('only_visible', default=True, required=False),
)
def get_context(self, context, start_level, template, only_visible):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'cms/content.html'}
if not (isinstance(start_level, int) or start_level.isdigit()):
only_visible = template
template = start_level
start_level = 0
try:
only_visible = bool(int(only_visible))
except:
only_visible = bool(only_visible)
ancestors = []
nodes = menu_pool.get_nodes(request, breadcrumb=True)
# Find home
home = None
root_url = unquote(reverse("pages-root"))
home = next((node for node in nodes if node.get_absolute_url() == root_url), None)
# Find selected
selected = None
selected = next((node for node in nodes if node.selected), None)
if selected and selected != home:
node = selected
while node:
if node.visible or not only_visible:
ancestors.append(node)
node = node.parent
if not ancestors or (ancestors and ancestors[-1] != home) and home:
ancestors.append(home)
ancestors.reverse()
if len(ancestors) >= start_level:
ancestors = ancestors[start_level:]
else:
ancestors = []
context['ancestors'] = ancestors
context['template'] = template
return context
register.tag(ShowBreadcrumb)
def _raw_language_marker(language, lang_code):
return language
def _native_language_marker(language, lang_code):
with force_language(lang_code):
return force_text(ugettext(language))
def _current_language_marker(language, lang_code):
return force_text(ugettext(language))
def _short_language_marker(language, lang_code):
return lang_code
MARKERS = {
'raw': _raw_language_marker,
'native': _native_language_marker,
'current': _current_language_marker,
'short': _short_language_marker,
}
class LanguageChooser(InclusionTag):
"""
Displays a language chooser
- template: template used to render the language chooser
"""
name = 'language_chooser'
template = 'menu/dummy.html'
options = Options(
Argument('template', default=NOT_PROVIDED, required=False),
Argument('i18n_mode', default='raw', required=False),
)
def get_context(self, context, template, i18n_mode):
if template in MARKERS:
_tmp = template
if i18n_mode not in MARKERS:
template = i18n_mode
else:
template = NOT_PROVIDED
i18n_mode = _tmp
if template is NOT_PROVIDED:
template = "menu/language_chooser.html"
if not i18n_mode in MARKERS:
i18n_mode = 'raw'
if 'request' not in context:
# If there's an exception (500), default context_processors may not be called.
return {'template': 'cms/content.html'}
marker = MARKERS[i18n_mode]
current_lang = get_language()
site = Site.objects.get_current()
languages = []
for lang in get_language_objects(site.pk):
if lang.get('public', True):
languages.append((lang['code'], marker(lang['name'], lang['code'])))
context['languages'] = languages
context['current_language'] = current_lang
context['template'] = template
return context
register.tag(LanguageChooser)
class PageLanguageUrl(InclusionTag):
"""
Displays the url of the current page in the defined language.
You can set a language_changer function with the set_language_changer function in the utils.py if there is no page.
This is needed if you have slugs in more than one language.
"""
name = 'page_language_url'
template = 'cms/content.html'
options = Options(
Argument('lang'),
)
def get_context(self, context, lang):
try:
# If there's an exception (500), default context_processors may not be called.
request = context['request']
except KeyError:
return {'template': 'cms/content.html'}
if hasattr(request, "_language_changer"):
try:
url = request._language_changer(lang)
except NoReverseMatch:
url = DefaultLanguageChanger(request)(lang)
else:
# use the default language changer
url = DefaultLanguageChanger(request)(lang)
return {'content': url}
register.tag(PageLanguageUrl)
| bsd-3-clause | 1,171,679,372,156,920,800 | 3,543,924,523,882,424,300 | 34.42615 | 119 | 0.606452 | false |
home-assistant/home-assistant | homeassistant/components/skybell/camera.py | 27 | 2818 | """Camera support for the Skybell HD Doorbell."""
from datetime import timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from . import DOMAIN as SKYBELL_DOMAIN, SkybellDevice
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=90)
IMAGE_AVATAR = "avatar"
IMAGE_ACTIVITY = "activity"
CONF_ACTIVITY_NAME = "activity_name"
CONF_AVATAR_NAME = "avatar_name"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=[IMAGE_AVATAR]): vol.All(
cv.ensure_list, [vol.In([IMAGE_AVATAR, IMAGE_ACTIVITY])]
),
vol.Optional(CONF_ACTIVITY_NAME): cv.string,
vol.Optional(CONF_AVATAR_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform for a Skybell device."""
cond = config[CONF_MONITORED_CONDITIONS]
names = {}
names[IMAGE_ACTIVITY] = config.get(CONF_ACTIVITY_NAME)
names[IMAGE_AVATAR] = config.get(CONF_AVATAR_NAME)
skybell = hass.data.get(SKYBELL_DOMAIN)
sensors = []
for device in skybell.get_devices():
for camera_type in cond:
sensors.append(SkybellCamera(device, camera_type, names.get(camera_type)))
add_entities(sensors, True)
class SkybellCamera(SkybellDevice, Camera):
"""A camera implementation for Skybell devices."""
def __init__(self, device, camera_type, name=None):
"""Initialize a camera for a Skybell device."""
self._type = camera_type
SkybellDevice.__init__(self, device)
Camera.__init__(self)
if name is not None:
self._name = f"{self._device.name} {name}"
else:
self._name = self._device.name
self._url = None
self._response = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def image_url(self):
"""Get the camera image url based on type."""
if self._type == IMAGE_ACTIVITY:
return self._device.activity_image
return self._device.image
def camera_image(self):
"""Get the latest camera image."""
super().update()
if self._url != self.image_url:
self._url = self.image_url
try:
self._response = requests.get(self._url, stream=True, timeout=10)
except requests.HTTPError as err:
_LOGGER.warning("Failed to get camera image: %s", err)
self._response = None
if not self._response:
return None
return self._response.content
| apache-2.0 | 1,507,789,284,554,345,000 | 1,565,693,329,135,666,000 | 28.978723 | 86 | 0.636267 | false |
chdecultot/erpnext | erpnext/erpnext_integrations/utils.py | 5 | 1165 | import frappe
from frappe import _
import base64, hashlib, hmac
from six.moves.urllib.parse import urlparse
def validate_webhooks_request(doctype, hmac_key, secret_key='secret'):
def innerfn(fn):
settings = frappe.get_doc(doctype)
if frappe.request and settings and settings.get(secret_key) and not frappe.flags.in_test:
sig = base64.b64encode(
hmac.new(
settings.get(secret_key).encode('utf8'),
frappe.request.data,
hashlib.sha256
).digest()
)
if frappe.request.data and \
frappe.get_request_header(hmac_key) and \
not sig == bytes(frappe.get_request_header(hmac_key).encode()):
frappe.throw(_("Unverified Webhook Data"))
frappe.set_user(settings.modified_by)
return fn
return innerfn
def get_webhook_address(connector_name, method, exclude_uri=False):
endpoint = "erpnext.erpnext_integrations.connectors.{0}.{1}".format(connector_name, method)
if exclude_uri:
return endpoint
try:
url = frappe.request.url
except RuntimeError:
url = "http://localhost:8000"
server_url = '{uri.scheme}://{uri.netloc}/api/method/{endpoint}'.format(uri=urlparse(url), endpoint=endpoint)
return server_url | gpl-3.0 | 6,721,628,062,439,105,000 | -2,387,390,531,904,086,500 | 26.761905 | 110 | 0.716738 | false |
jdahlin/stoq-wubi | src/bittorrent/Uploader.py | 12 | 8059 | # Written by Bram Cohen
# see LICENSE.txt for license information
from CurrentRateMeasure import Measure
class Upload:
def __init__(self, connection, choker, storage,
max_slice_length, max_rate_period, fudge):
self.connection = connection
self.choker = choker
self.storage = storage
self.max_slice_length = max_slice_length
self.max_rate_period = max_rate_period
self.choked = True
self.interested = False
self.buffer = []
self.measure = Measure(max_rate_period, fudge)
if storage.do_I_have_anything():
connection.send_bitfield(storage.get_have_list())
def got_not_interested(self):
if self.interested:
self.interested = False
del self.buffer[:]
self.choker.not_interested(self.connection)
def got_interested(self):
if not self.interested:
self.interested = True
self.choker.interested(self.connection)
def flushed(self):
while len(self.buffer) > 0 and self.connection.is_flushed():
index, begin, length = self.buffer[0]
del self.buffer[0]
piece = self.storage.get_piece(index, begin, length)
if piece is None:
self.connection.close()
return
self.measure.update_rate(len(piece))
self.connection.send_piece(index, begin, piece)
def got_request(self, index, begin, length):
if not self.interested or length > self.max_slice_length:
self.connection.close()
return
if not self.choked:
self.buffer.append((index, begin, length))
self.flushed()
def got_cancel(self, index, begin, length):
try:
self.buffer.remove((index, begin, length))
except ValueError:
pass
def choke(self):
if not self.choked:
self.choked = True
del self.buffer[:]
self.connection.send_choke()
def unchoke(self):
if self.choked:
self.choked = False
self.connection.send_unchoke()
def is_choked(self):
return self.choked
def is_interested(self):
return self.interested
def has_queries(self):
return len(self.buffer) > 0
def get_rate(self):
return self.measure.get_rate()
class DummyConnection:
def __init__(self, events):
self.events = events
self.flushed = False
def send_bitfield(self, bitfield):
self.events.append(('bitfield', bitfield))
def is_flushed(self):
return self.flushed
def close(self):
self.events.append('closed')
def send_piece(self, index, begin, piece):
self.events.append(('piece', index, begin, piece))
def send_choke(self):
self.events.append('choke')
def send_unchoke(self):
self.events.append('unchoke')
class DummyChoker:
def __init__(self, events):
self.events = events
def interested(self, connection):
self.events.append('interested')
def not_interested(self, connection):
self.events.append('not interested')
class DummyStorage:
def __init__(self, events):
self.events = events
def do_I_have_anything(self):
self.events.append('do I have')
return True
def get_have_list(self):
self.events.append('get have list')
return [False, True]
def get_piece(self, index, begin, length):
self.events.append(('get piece', index, begin, length))
if length == 4:
return None
return 'a' * length
def test_skip_over_choke():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.got_request(0, 0, 3)
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested']
def test_bad_piece():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.unchoke()
assert not u.is_choked()
u.got_request(0, 0, 4)
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'unchoke',
('get piece', 0, 0, 4), 'closed']
def test_still_rejected_after_unchoke():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
assert u.is_choked()
assert not u.is_interested()
u.got_interested()
assert u.is_interested()
u.unchoke()
assert not u.is_choked()
u.got_request(0, 0, 3)
u.choke()
u.unchoke()
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'unchoke',
'choke', 'unchoke']
def test_sends_when_flushed():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
dco.flushed = True
u.flushed()
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
('get piece', 0, 1, 3), ('piece', 0, 1, 'aaa')]
def test_sends_immediately():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
dco.flushed = True
u.got_request(0, 1, 3)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
('get piece', 0, 1, 3), ('piece', 0, 1, 'aaa')]
def test_cancel():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
u.got_cancel(0, 1, 3)
u.got_cancel(0, 1, 2)
u.flushed()
dco.flushed = True
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested']
def test_clears_on_not_interested():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.unchoke()
u.got_interested()
u.got_request(0, 1, 3)
u.got_not_interested()
dco.flushed = True
u.flushed()
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'unchoke', 'interested',
'not interested']
def test_close_when_sends_on_not_interested():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.got_request(0, 1, 3)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'closed']
def test_close_over_max_length():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
u = Upload(dco, dch, ds, 100, 20, 5)
u.got_interested()
u.got_request(0, 1, 101)
assert events == ['do I have', 'get have list',
('bitfield', [False, True]), 'interested', 'closed']
def test_no_bitfield_on_start_empty():
events = []
dco = DummyConnection(events)
dch = DummyChoker(events)
ds = DummyStorage(events)
ds.do_I_have_anything = lambda: False
u = Upload(dco, dch, ds, 100, 20, 5)
assert events == []
| gpl-2.0 | 9,016,510,374,265,289,000 | 8,432,363,529,746,441,000 | 28.093863 | 68 | 0.584936 | false |
reinout/django | django/core/files/temp.py | 28 | 2501 | """
The temp module provides a NamedTemporaryFile that can be reopened in the same
process on any platform. Most platforms use the standard Python
tempfile.NamedTemporaryFile class, but Windows users are given a custom class.
This is needed because the Python implementation of NamedTemporaryFile uses the
O_TEMPORARY flag under Windows, which prevents the file from being reopened
if the same flag is not provided [1][2]. Note that this does not address the
more general issue of opening a file for writing and reading in multiple
processes in a manner that works across platforms.
The custom version of NamedTemporaryFile doesn't support the same keyword
arguments available in tempfile.NamedTemporaryFile.
1: https://mail.python.org/pipermail/python-list/2005-December/336957.html
2: http://bugs.python.org/issue14243
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that supports reopening of the
temporary file in Windows.
Unlike tempfile.NamedTemporaryFile from the standard library,
__init__() doesn't support the 'delete', 'buffering', 'encoding', or
'newline' keyword arguments.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='', dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except OSError:
pass
def __del__(self):
self.close()
def __enter__(self):
self.file.__enter__()
return self
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
| bsd-3-clause | -3,524,227,743,421,770,000 | -6,795,918,361,961,818,000 | 32.797297 | 83 | 0.639744 | false |
kennethlove/django | tests/regressiontests/inline_formsets/tests.py | 6 | 6162 | from __future__ import absolute_import, unicode_literals
from django.forms.models import inlineformset_factory
from django.test import TestCase
from .models import Poet, Poem, School, Parent, Child
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': 'test',
'poem_set-0-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '0',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': '',
'poem_set-0-poem': '1',
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True)
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': unicode(poem.id),
'poem_set-0-poem': unicode(poem.id),
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name='test')
mother = Parent.objects.create(name='mother')
father = Parent.objects.create(name='father')
data = {
'child_set-TOTAL_FORMS': '1',
'child_set-INITIAL_FORMS': '0',
'child_set-MAX_NUM_FORMS': '0',
'child_set-0-name': 'child',
}
formset = ChildFormSet(data, instance=school)
self.assertEqual(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
class InlineFormsetFactoryTest(TestCase):
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name='mother')
inlineformset_factory(Parent, Child, fk_name='father')
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
self.assertRaisesRegexp(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has more than 1 ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child
)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
self.assertRaises(Exception,
"fk_name 'school' is not a ForeignKey to <class 'regressiontests.inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child, fk_name='school'
)
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
self.assertRaisesRegexp(Exception,
"<class 'regressiontests.inline_formsets.models.Child'> has no field named 'test'",
inlineformset_factory, Parent, Child, fk_name='test'
)
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(
Parent, Child, exclude=['school'], fk_name='mother'
)
inlineformset_factory(
Parent, Child, exclude=('school',), fk_name='mother'
)
| bsd-3-clause | -6,022,884,876,794,369,000 | 6,202,665,056,365,425,000 | 38 | 155 | 0.596884 | false |
rouge8/pip | tests/functional/test_download.py | 1 | 21568 | import os.path
import shutil
import textwrap
import pytest
from pip._internal.cli.status_codes import ERROR
from tests.lib.path import Path
def fake_wheel(data, wheel_path):
shutil.copy(
data.packages.joinpath('simple.dist-0.1-py2.py3-none-any.whl'),
data.packages.joinpath(wheel_path),
)
@pytest.mark.network
def test_download_if_requested(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip(
'download', '-d', 'pip_downloads', 'INITools==0.1'
)
assert Path('scratch') / 'pip_downloads' / 'INITools-0.1.tar.gz' \
in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_setuptools(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip('download', 'setuptools')
setuptools_prefix = str(Path('scratch') / 'setuptools')
assert any(
path.startswith(setuptools_prefix) for path in result.files_created
)
def test_download_wheel(script, data):
"""
Test using "pip download" to download a *.whl archive.
"""
result = script.pip(
'download',
'--no-index',
'-f', data.packages,
'-d', '.', 'meta'
)
assert (
Path('scratch') / 'meta-1.0-py2.py3-none-any.whl'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
@pytest.mark.network
def test_single_download_from_requirements_file(script):
"""
It should support download (in the scratch path) from PyPI from a
requirements file
"""
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_should_download_dependencies(script):
"""
It should download dependencies (in the scratch path)
"""
result = script.pip(
'download', 'Paste[openid]==1.7.5.1', '-d', '.'
)
assert Path('scratch') / 'Paste-1.7.5.1.tar.gz' in result.files_created
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert script.site_packages / 'openid' not in result.files_created
def test_download_wheel_archive(script, data):
"""
It should download a wheel archive path
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--no-deps'
)
assert Path('scratch') / wheel_filename in result.files_created
def test_download_should_download_wheel_deps(script, data):
"""
It should download dependencies for wheels(in the scratch path)
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
dep_filename = 'translationstring-1.1.tar.gz'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--find-links', data.find_links, '--no-index'
)
assert Path('scratch') / wheel_filename in result.files_created
assert Path('scratch') / dep_filename in result.files_created
@pytest.mark.network
def test_download_should_skip_existing_files(script):
"""
It should not download files already existing in the scratch dir
"""
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
# adding second package to test-req.txt
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
python-openid==2.2.5
"""))
# only the second package should be downloaded
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert Path('scratch') / 'INITools-0.1.tar.gz' not in result.files_created
assert script.site_packages / 'initools' not in result.files_created
assert script.site_packages / 'openid' not in result.files_created
@pytest.mark.network
def test_download_vcs_link(script):
"""
It should allow -d flag for vcs links, regression test for issue #798.
"""
result = script.pip(
'download', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git'
)
assert (
Path('scratch') / 'pip-test-package-0.1.1.zip'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
def test_only_binary_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_no_deps_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--no-deps`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--no-deps',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--no-deps`` or ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_no_binary_set_then_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--only-binary=:all:`` is set without ``--no-binary``.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--no-binary=fake',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_download_specify_platform(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_9_x86_64',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl')
fake_wheel(data, 'fake-2.0-py2.py3-none-linux_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_10_x86_64',
'fake'
)
assert (
Path('scratch') /
'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl'
in result.files_created
)
# OSX platform wheels are not backward-compatible.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_8_x86_64',
'fake',
expect_error=True,
)
# No linux wheel provided for this version.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==1',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==2'
)
assert (
Path('scratch') / 'fake-2.0-py2.py3-none-linux_x86_64.whl'
in result.files_created
)
class TestDownloadPlatformManylinuxes(object):
"""
"pip download --platform" downloads a .whl archive supported for
manylinux platforms.
"""
@pytest.mark.parametrize("platform", [
"linux_x86_64",
"manylinux1_x86_64",
"manylinux2010_x86_64",
])
def test_download_universal(self, platform, script, data):
"""
Universal wheels are returned even for specific platforms.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', platform,
'fake',
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
@pytest.mark.parametrize("wheel_abi,platform", [
("manylinux1_x86_64", "manylinux1_x86_64"),
("manylinux1_x86_64", "manylinux2010_x86_64"),
("manylinux2010_x86_64", "manylinux2010_x86_64"),
])
def test_download_compatible_manylinuxes(
self, wheel_abi, platform, script, data,
):
"""
Earlier manylinuxes are compatible with later manylinuxes.
"""
wheel = 'fake-1.0-py2.py3-none-{}.whl'.format(wheel_abi)
fake_wheel(data, wheel)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', platform,
'fake',
)
assert Path('scratch') / wheel in result.files_created
def test_explicit_platform_only(self, data, script):
"""
When specifying the platform, manylinux1 needs to be the
explicit platform--it won't ever be added to the compatible
tags.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-linux_x86_64.whl')
script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
)
def test_download__python_version(script, data):
"""
Test using "pip download --python-version" to download a .whl archive
supported for a specific interpreter
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '27',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '33',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2-none-any.whl')
fake_wheel(data, 'fake-2.0-py3-none-any.whl')
# No py3 provided for version 1.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake==1.0',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '26',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
assert (
Path('scratch') / 'fake-2.0-py3-none-any.whl'
in result.files_created
)
def make_wheel_with_python_requires(script, package_name, python_requires):
"""
Create a wheel using the given python_requires.
:return: the path to the wheel file.
"""
package_dir = script.scratch_path / package_name
package_dir.mkdir()
text = textwrap.dedent("""\
from setuptools import setup
setup(name='{}',
python_requires='{}',
version='1.0')
""").format(package_name, python_requires)
package_dir.joinpath('setup.py').write_text(text)
script.run(
'python', 'setup.py', 'bdist_wheel', '--universal', cwd=package_dir,
)
file_name = '{}-1.0-py2.py3-none-any.whl'.format(package_name)
return package_dir / 'dist' / file_name
def test_download__python_version_used_for_python_requires(
script, data, with_wheel,
):
"""
Test that --python-version is used for the Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script, 'mypackage', python_requires='==3.2',
)
wheel_dir = os.path.dirname(wheel_path)
def make_args(python_version):
return [
'download', '--no-index', '--find-links', wheel_dir,
'--only-binary=:all:',
'--dest', '.',
'--python-version', python_version,
'mypackage==1.0',
]
args = make_args('33')
result = script.pip(*args, expect_error=True)
expected_err = (
"ERROR: Package 'mypackage' requires a different Python: "
"3.3.0 not in '==3.2'"
)
assert expected_err in result.stderr, 'stderr: {}'.format(result.stderr)
# Now try with a --python-version that satisfies the Requires-Python.
args = make_args('32')
script.pip(*args) # no exception
def test_download_specify_abi(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--abi', 'fake_abi',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--abi', 'none',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--abi', 'cp27m',
'fake',
)
data.reset()
fake_wheel(data, 'fake-1.0-fk2-fakeabi-fake_platform.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'--implementation', 'fk',
'--platform', 'fake_platform',
'--abi', 'fakeabi',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk2-fakeabi-fake_platform.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--platform', 'fake_platform',
'--abi', 'none',
'fake',
expect_error=True,
)
def test_download_specify_implementation(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-fk2.fk3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk2.fk3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-fk3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--python-version', '3',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--python-version', '2',
'fake',
expect_error=True,
)
def test_download_exit_status_code_when_no_requirements(script):
"""
Test download exit status code when no requirements specified
"""
result = script.pip('download', expect_error=True)
assert (
"You must give at least one requirement to download" in result.stderr
)
assert result.returncode == ERROR
def test_download_exit_status_code_when_blank_requirements_file(script):
"""
Test download exit status code when blank requirements file specified
"""
script.scratch_path.joinpath("blank.txt").write_text("\n")
script.pip('download', '-r', 'blank.txt')
def test_download_prefer_binary_when_tarball_higher_than_wheel(script, data):
fake_wheel(data, 'source-0.8-py2.py3-none-any.whl')
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.', 'source'
)
assert (
Path('scratch') / 'source-0.8-py2.py3-none-any.whl'
in result.files_created
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
not in result.files_created
)
def test_download_prefer_binary_when_wheel_doesnt_satisfy_req(script, data):
fake_wheel(data, 'source-0.8-py2.py3-none-any.whl')
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
source>0.9
"""))
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.',
'-r', script.scratch_path / 'test-req.txt'
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
in result.files_created
)
assert (
Path('scratch') / 'source-0.8-py2.py3-none-any.whl'
not in result.files_created
)
def test_download_prefer_binary_when_only_tarball_exists(script, data):
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.', 'source'
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
in result.files_created
)
| mit | 2,973,209,569,921,746,400 | 3,307,267,530,576,609,000 | 28.464481 | 79 | 0.562175 | false |
TensorVision/MediSeg | eval.py | 1 | 3028 | """Evaluation of the Model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
import os
import utils
import logging
import sys
import imp
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
# TODO: Iterate over all possible Values
# Write Values to Tensorboard
def evaluate(train_dir):
"""Loads the model and runs evaluation
"""
target_dir = os.path.join(train_dir, "model_files")
params = imp.load_source("params", os.path.join(target_dir, "params.py"))
data_input = imp.load_source("input", os.path.join(target_dir, "input.py"))
network = imp.load_source("network", os.path.join(target_dir, "network.py"))
with tf.Graph().as_default():
# Retrieve images and labels
eval_data = FLAGS.eval_data == 'test'
images, labels = data_input.inputs(eval_data=eval_data, data_dir=utils.cfg.data_dir,
batch_size=params.batch_size)
# Generate placeholders for the images and labels.
keep_prob = utils.placeholder_inputs(params.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = network.inference(images, keep_prob)
# Add to the Graph the Ops for loss calculation.
loss = network.loss(logits, labels)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = network.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
ckpt = tf.train.get_checkpoint_state(train_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("No checkpoints found! ")
exit(1)
print("Doing Evaluation with lots of data")
utils.do_eval(sess=sess,
eval_correct=eval_correct,
keep_prob=keep_prob,
num_examples=params.num_examples_per_epoch_for_eval,
params=params,
name="eval")
def main(_):
train_dir = utils.get_train_dir()
evaluate(train_dir)
if __name__ == '__main__':
tf.app.run()
| mit | 3,656,827,405,781,833,700 | 6,114,687,448,662,701,000 | 25.561404 | 89 | 0.652906 | false |
dendisuhubdy/tensorflow | tensorflow/python/platform/resource_loader.py | 23 | 4143 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resource management library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os as _os
import sys as _sys
from tensorflow.python.util import tf_inspect as _inspect
from tensorflow.python.util.tf_export import tf_export
@tf_export('resource_loader.load_resource')
def load_resource(path):
"""Load the resource at given path, where path is relative to tensorflow/.
Args:
path: a string resource path relative to tensorflow/.
Returns:
The contents of that resource.
Raises:
IOError: If the path is not found, or the resource can't be opened.
"""
tensorflow_root = (_os.path.join(
_os.path.dirname(__file__), _os.pardir, _os.pardir))
path = _os.path.join(tensorflow_root, path)
path = _os.path.abspath(path)
with open(path, 'rb') as f:
return f.read()
# pylint: disable=protected-access
@tf_export('resource_loader.get_data_files_path')
def get_data_files_path():
"""Get a direct path to the data files colocated with the script.
Returns:
The directory where files specified in data attribute of py_test
and py_binary are stored.
"""
return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))
@tf_export('resource_loader.get_root_dir_with_all_resources')
def get_root_dir_with_all_resources():
"""Get a root directory containing all the data attributes in the build rule.
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary. Falls back to returning the same as get_data_files_path if it
fails to detect a bazel runfiles directory.
"""
script_dir = get_data_files_path()
# Create a history of the paths, because the data files are located relative
# to the repository root directory, which is directly under runfiles
# directory.
directories = [script_dir]
data_files_dir = ''
while True:
candidate_dir = directories[-1]
current_directory = _os.path.basename(candidate_dir)
if '.runfiles' in current_directory:
# Our file should never be directly under runfiles.
# If the history has only one item, it means we are directly inside the
# runfiles directory, something is wrong, fall back to the default return
# value, script directory.
if len(directories) > 1:
data_files_dir = directories[-2]
break
else:
new_candidate_dir = _os.path.dirname(candidate_dir)
# If we are at the root directory these two will be the same.
if new_candidate_dir == candidate_dir:
break
else:
directories.append(new_candidate_dir)
return data_files_dir or script_dir
@tf_export('resource_loader.get_path_to_datafile')
def get_path_to_datafile(path):
"""Get the path to the specified file in the data dependencies.
The path is relative to tensorflow/
Args:
path: a string resource path relative to tensorflow/
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary.
Raises:
IOError: If the path is not found, or the resource can't be opened.
"""
data_files_path = _os.path.dirname(_inspect.getfile(_sys._getframe(1)))
return _os.path.join(data_files_path, path)
@tf_export('resource_loader.readahead_file_path')
def readahead_file_path(path, readahead='128M'): # pylint: disable=unused-argument
"""Readahead files not implemented; simply returns given path."""
return path
| apache-2.0 | 4,981,346,782,846,628,000 | 6,846,292,661,387,557,000 | 32.682927 | 83 | 0.700217 | false |
shaufi10/odoo | addons/base_import/tests/test_cases.py | 189 | 15021 | # -*- encoding: utf-8 -*-
import unittest2
from openerp.tests.common import TransactionCase
from .. import models
ID_FIELD = {
'id': 'id',
'name': 'id',
'string': "External ID",
'required': False,
'fields': [],
}
def make_field(name='value', string='unknown', required=False, fields=[]):
return [
ID_FIELD,
{'id': name, 'name': name, 'string': string, 'required': required, 'fields': fields},
]
def sorted_fields(fields):
""" recursively sort field lists to ease comparison """
recursed = [dict(field, fields=sorted_fields(field['fields'])) for field in fields]
return sorted(recursed, key=lambda field: field['id'])
class BaseImportCase(TransactionCase):
def assertEqualFields(self, fields1, fields2):
self.assertEqual(sorted_fields(fields1), sorted_fields(fields2))
class test_basic_fields(BaseImportCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_base(self):
""" A basic field is not required """
self.assertEqualFields(self.get_fields('char'), make_field())
def test_required(self):
""" Required fields should be flagged (so they can be fill-required) """
self.assertEqualFields(self.get_fields('char.required'), make_field(required=True))
def test_readonly(self):
""" Readonly fields should be filtered out"""
self.assertEqualFields(self.get_fields('char.readonly'), [ID_FIELD])
def test_readonly_states(self):
""" Readonly fields with states should not be filtered out"""
self.assertEqualFields(self.get_fields('char.states'), make_field())
def test_readonly_states_noreadonly(self):
""" Readonly fields with states having nothing to do with
readonly should still be filtered out"""
self.assertEqualFields(self.get_fields('char.noreadonly'), [ID_FIELD])
def test_readonly_states_stillreadonly(self):
""" Readonly fields with readonly states leaving them readonly
always... filtered out"""
self.assertEqualFields(self.get_fields('char.stillreadonly'), [ID_FIELD])
def test_m2o(self):
""" M2O fields should allow import of themselves (name_get),
their id and their xid"""
self.assertEqualFields(self.get_fields('m2o'), make_field(fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]))
def test_m2o_required(self):
""" If an m2o field is required, its three sub-fields are
required as well (the client has to handle that: requiredness
is id-based)
"""
self.assertEqualFields(self.get_fields('m2o.required'), make_field(required=True, fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': True, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': True, 'fields': []},
]))
class test_o2m(BaseImportCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_shallow(self):
self.assertEqualFields(self.get_fields('o2m'), make_field(fields=[
ID_FIELD,
# FIXME: should reverse field be ignored?
{'id': 'parent_id', 'name': 'parent_id', 'string': 'unknown', 'required': False, 'fields': [
{'id': 'parent_id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'parent_id', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]},
{'id': 'value', 'name': 'value', 'string': 'unknown', 'required': False, 'fields': []},
]))
class test_match_headers_single(TransactionCase):
def test_match_by_name(self):
match = self.registry('base_import.import')._match_header(
'f0', [{'name': 'f0'}], {})
self.assertEqual(match, [{'name': 'f0'}])
def test_match_by_string(self):
match = self.registry('base_import.import')._match_header(
'some field', [{'name': 'bob', 'string': "Some Field"}], {})
self.assertEqual(match, [{'name': 'bob', 'string': "Some Field"}])
def test_nomatch(self):
match = self.registry('base_import.import')._match_header(
'should not be', [{'name': 'bob', 'string': "wheee"}], {})
self.assertEqual(match, [])
def test_recursive_match(self):
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f1', [f], {})
self.assertEqual(match, [f, f['fields'][1]])
def test_recursive_nomatch(self):
""" Match first level, fail to match second level
"""
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f2', [f], {})
self.assertEqual(match, [])
class test_match_headers_multiple(TransactionCase):
def test_noheaders(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
[], [], {}),
(None, None)
)
def test_nomatch(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter([
['foo', 'bar', 'baz', 'qux'],
['v1', 'v2', 'v3', 'v4'],
]),
[],
{'headers': True}),
(
['foo', 'bar', 'baz', 'qux'],
dict.fromkeys(range(4))
)
)
def test_mixed(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter(['foo bar baz qux/corge'.split()]),
[
{'name': 'bar', 'string': 'Bar'},
{'name': 'bob', 'string': 'Baz'},
{'name': 'qux', 'string': 'Qux', 'fields': [
{'name': 'corge', 'fields': []},
]}
],
{'headers': True}),
(['foo', 'bar', 'baz', 'qux/corge'], {
0: None,
1: ['bar'],
2: ['bob'],
3: ['qux', 'corge'],
})
)
class test_preview(TransactionCase):
def make_import(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'res.users',
'file': u"로그인,언어\nbob,1\n".encode('euc_kr'),
})
return Import, id
def test_encoding(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': 'foo',
'separator': ',',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': 'bob',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_success(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
'headers': True,
})
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue'], 2: None})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
# Order depends on iteration order of fields_get
self.assertItemsEqual(result['fields'], [
ID_FIELD,
{'id': 'name', 'name': 'name', 'string': 'Name', 'required':False, 'fields': []},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required':True, 'fields': []},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required':False, 'fields': []},
])
self.assertEqual(result['preview'], [
['foo', '1', '2'],
['bar', '3', '4'],
['qux', '5', '6'],
])
# Ensure we only have the response fields we expect
self.assertItemsEqual(result.keys(), ['matches', 'headers', 'fields', 'preview'])
class test_convert_import_data(TransactionCase):
""" Tests conversion of base_import.import input into data which
can be fed to Model.import_data
"""
def test_all(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue', 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '1', '2'),
('bar', '3', '4'),
('qux', '5', '6'),
])
def test_filtered(self):
""" If ``False`` is provided as field mapping for a column,
that column should be removed from importable data
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('bar', '4'),
('qux', '6'),
])
def test_norow(self):
""" If a row is composed only of empty values (due to having
filtered out non-empty values from it), it should be removed
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
',3,\n'
',5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('', '6'),
])
def test_empty_rows(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value\n'
'foo,1\n'
'\n'
'bar,2\n'
' \n'
'\t \n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue'])
self.assertItemsEqual(data, [
('foo', '1'),
('bar', '2'),
])
def test_nofield(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [],
{'quoting': '"', 'separator': ',', 'headers': True,})
def test_falsefields(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [False, False, False],
{'quoting': '"', 'separator': ',', 'headers': True,})
class test_failures(TransactionCase):
def test_big_attachments(self):
"""
Ensure big fields (e.g. b64-encoded image data) can be imported and
we're not hitting limits of the default CSV parser config
"""
import csv, cStringIO
from PIL import Image
im = Image.new('RGB', (1920, 1080))
fout = cStringIO.StringIO()
writer = csv.writer(fout, dialect=None)
writer.writerows([
['name', 'db_datas'],
['foo', im.tobytes().encode('base64')]
])
Import = self.env['base_import.import']
imp = Import.create({
'res_model': 'ir.attachment',
'file': fout.getvalue()
})
[results] = imp.do(
['name', 'db_datas'],
{'headers': True, 'separator': ',', 'quoting': '"'})
self.assertFalse(
results, "results should be empty on successful import")
| agpl-3.0 | -3,858,218,885,014,097,000 | 6,544,352,248,899,632,000 | 35.972906 | 115 | 0.50956 | false |
johndpope/tensorflow | tensorflow/python/debug/cli/debugger_cli_common_test.py | 101 | 45452 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Building Blocks of the TensorFlow Debugger CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import stat
import tempfile
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CommandLineExitTest(test_util.TensorFlowTestCase):
def testConstructionWithoutToken(self):
exit_exc = debugger_cli_common.CommandLineExit()
self.assertTrue(isinstance(exit_exc, Exception))
def testConstructionWithToken(self):
exit_exc = debugger_cli_common.CommandLineExit(exit_token={"foo": "bar"})
self.assertTrue(isinstance(exit_exc, Exception))
self.assertEqual({"foo": "bar"}, exit_exc.exit_token)
class RichTextLinesTest(test_util.TensorFlowTestCase):
def testRichTextLinesConstructorComplete(self):
# Test RichTextLines constructor.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual(2, len(screen_output.annotations))
self.assertEqual(2, screen_output.num_lines())
def testRichTextLinesConstructorWithInvalidType(self):
with self.assertRaisesRegexp(ValueError, "Unexpected type in lines"):
debugger_cli_common.RichTextLines(123)
def testRichTextLinesConstructorWithString(self):
# Test constructing a RichTextLines object with a string, instead of a list
# of strings.
screen_output = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
self.assertEqual(1, len(screen_output.lines))
self.assertEqual(1, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.annotations))
def testRichLinesAppendRichLine(self):
rtl = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]})
rtl.append_rich_line(debugger_cli_common.RichLine("Violets are ") +
debugger_cli_common.RichLine("blue", "blue"))
self.assertEqual(2, len(rtl.lines))
self.assertEqual(2, len(rtl.font_attr_segs))
self.assertEqual(1, len(rtl.font_attr_segs[0]))
self.assertEqual(1, len(rtl.font_attr_segs[1]))
def testRichLineLenMethodWorks(self):
self.assertEqual(0, len(debugger_cli_common.RichLine()))
self.assertEqual(0, len(debugger_cli_common.RichLine("")))
self.assertEqual(1, len(debugger_cli_common.RichLine("x")))
self.assertEqual(6, len(debugger_cli_common.RichLine("x y z ", "blue")))
def testRichTextLinesConstructorIncomplete(self):
# Test RichTextLines constructor, with incomplete keyword arguments.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual({}, screen_output.annotations)
def testModifyRichTextLinesObject(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
self.assertEqual(2, len(screen_output.lines))
screen_output.lines.append("Sugar is sweet")
self.assertEqual(3, len(screen_output.lines))
def testMergeRichTextLines(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines(
["Lilies are white", "Sunflowers are yellow"],
font_attr_segs={0: [(0, 6, "white")],
1: [(0, 7, "yellow")]},
annotations={
"metadata": "foo",
0: "full spectrum",
1: "medium wavelength"
})
screen_output_1.extend(screen_output_2)
self.assertEqual(4, screen_output_1.num_lines())
self.assertEqual([
"Roses are red", "Violets are blue", "Lilies are white",
"Sunflowers are yellow"
], screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
"metadata": "foo",
0: "longer wavelength",
1: "shorter wavelength",
2: "full spectrum",
3: "medium wavelength"
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptyOther(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines([])
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptySelf(self):
screen_output_1 = debugger_cli_common.RichTextLines([])
screen_output_2 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testAppendALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.append("Violets are blue", [(0, 7, "blue")])
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
def testPrependALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.prepend("Violets are blue", font_attr_segs=[(0, 7, "blue")])
self.assertEqual(["Violets are blue", "Roses are red"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 7, "blue")],
1: [(0, 5, "red")],
}, screen_output_1.font_attr_segs)
def testWriteToFileSucceeds(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = tempfile.mktemp()
screen_output.write_to_file(file_path)
with gfile.Open(file_path, "r") as f:
self.assertEqual("Roses are red\nViolets are blue\n", f.read())
# Clean up.
gfile.Remove(file_path)
def testAttemptToWriteToADirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
with self.assertRaises(Exception):
screen_output.write_to_file("/")
def testAttemptToWriteToFileInNonexistentDirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = os.path.join(tempfile.mkdtemp(), "foo", "bar.txt")
with self.assertRaises(Exception):
screen_output.write_to_file(file_path)
class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._intentional_error_msg = "Intentionally raised exception"
def _noop_handler(self, argv, screen_info=None):
# A handler that does nothing other than returning "Done."
return debugger_cli_common.RichTextLines(["Done."])
def _handler_raising_exception(self, argv, screen_info=None):
# A handler that intentionally raises an exception.
raise RuntimeError(self._intentional_error_msg)
def _handler_returning_wrong_type(self, argv, screen_info=None):
# A handler that returns a wrong type, instead of the correct type
# (RichTextLines).
return "Hello"
def _echo_screen_cols(self, argv, screen_info=None):
# A handler that uses screen_info.
return debugger_cli_common.RichTextLines(
["cols = %d" % screen_info["cols"]])
def _exiting_handler(self, argv, screen_info=None):
"""A handler that exits with an exit token."""
if argv:
exit_token = argv[0]
else:
exit_token = None
raise debugger_cli_common.CommandLineExit(exit_token=exit_token)
def testRegisterEmptyCommandPrefix(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register an empty-string as a command prefix should trigger
# an exception.
with self.assertRaisesRegexp(ValueError, "Empty command prefix"):
registry.register_command_handler("", self._noop_handler, "")
def testRegisterAndInvokeHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
self.assertTrue(registry.is_registered("noop"))
self.assertFalse(registry.is_registered("beep"))
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
# Attempt to invoke an unregistered command prefix should trigger an
# exception.
with self.assertRaisesRegexp(ValueError, "No handler is registered"):
registry.dispatch_command("beep", [])
# Empty command prefix should trigger an exception.
with self.assertRaisesRegexp(ValueError, "Prefix is empty"):
registry.dispatch_command("", [])
def testExitingHandler(self):
"""Test that exit exception is correctly raised."""
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("exit", self._exiting_handler, "")
self.assertTrue(registry.is_registered("exit"))
exit_token = None
try:
registry.dispatch_command("exit", ["foo"])
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
self.assertEqual("foo", exit_token)
def testInvokeHandlerWithScreenInfo(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Register and invoke a command handler that uses screen_info.
registry.register_command_handler("cols", self._echo_screen_cols, "")
cmd_output = registry.dispatch_command(
"cols", [], screen_info={"cols": 100})
self.assertEqual(["cols = 100"], cmd_output.lines)
def testRegisterAndInvokeHandlerWithAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n", "NOOP"])
# is_registered() should work for full prefix and aliases.
self.assertTrue(registry.is_registered("noop"))
self.assertTrue(registry.is_registered("n"))
self.assertTrue(registry.is_registered("NOOP"))
cmd_output = registry.dispatch_command("n", [])
self.assertEqual(["Done."], cmd_output.lines)
cmd_output = registry.dispatch_command("NOOP", [])
self.assertEqual(["Done."], cmd_output.lines)
def testHandlerWithWrongReturnType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("wrong_return",
self._handler_returning_wrong_type, "")
# If the command handler fails to return a RichTextLines instance, an error
# should be triggered.
with self.assertRaisesRegexp(
ValueError,
"Return value from command handler.*is not None or a RichTextLines "
"instance"):
registry.dispatch_command("wrong_return", [])
def testRegisterDuplicateHandlers(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
# Registering the same command prefix more than once should trigger an
# exception.
with self.assertRaisesRegexp(
ValueError, "A handler is already registered for command prefix"):
registry.register_command_handler("noop", self._noop_handler, "")
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
def testRegisterDuplicateAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n"])
# Clash with existing alias.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["n"])
# The name clash should have prevent the handler from being registered.
self.assertFalse(registry.is_registered("cols"))
# Aliases can also clash with command prefixes.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["noop"])
self.assertFalse(registry.is_registered("cols"))
def testDispatchHandlerRaisingException(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("raise_exception",
self._handler_raising_exception, "")
# The registry should catch and wrap exceptions that occur during command
# handling.
cmd_output = registry.dispatch_command("raise_exception", [])
# The error output contains a stack trace.
# So the line count should be >= 2.
self.assertGreater(len(cmd_output.lines), 2)
self.assertTrue(cmd_output.lines[0].startswith(
"Error occurred during handling of command"))
self.assertTrue(cmd_output.lines[1].endswith(self._intentional_error_msg))
def testRegisterNonCallableHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register a non-callable handler should fail.
with self.assertRaisesRegexp(ValueError, "handler is not callable"):
registry.register_command_handler("non_callable", 1, "")
def testRegisterHandlerWithInvalidHelpInfoType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
with self.assertRaisesRegexp(ValueError, "help_info is not a str"):
registry.register_command_handler("noop", self._noop_handler, ["foo"])
def testGetHelpFull(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
help_lines = registry.get_help().lines
# The help info should list commands in alphabetically sorted order,
# regardless of order in which the commands are reigstered.
self.assertEqual("cols", help_lines[0])
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
self.assertFalse(help_lines[4])
self.assertFalse(help_lines[5])
# The default help command should appear in the help output.
self.assertEqual("help", help_lines[6])
self.assertEqual("noop", help_lines[12])
self.assertTrue(help_lines[13].endswith("Aliases: n, NOOP"))
self.assertFalse(help_lines[14])
self.assertTrue(help_lines[15].endswith("No operation."))
self.assertTrue(help_lines[16].endswith("I.e., do nothing."))
def testGetHelpSingleCommand(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help info for one of the two commands, using full prefix.
help_lines = registry.get_help("cols").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for one of the two commands, using alias.
help_lines = registry.get_help("c").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for a nonexistent command.
help_lines = registry.get_help("foo").lines
self.assertEqual("Invalid command prefix: \"foo\"", help_lines[0])
def testHelpCommandWithoutIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help for all commands.
output = registry.dispatch_command("help", [])
self.assertEqual(["cols", " Aliases: c", "",
" Show screen width in number of columns.", "", "",
"help", " Aliases: h", "", " Print this help message.",
"", "", "noop", " Aliases: n, NOOP", "",
" No operation.", " I.e., do nothing.", "", ""],
output.lines)
# Get help for one specific command prefix.
output = registry.dispatch_command("help", ["noop"])
self.assertEqual(["noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing."], output.lines)
# Get help for a nonexistent command prefix.
output = registry.dispatch_command("help", ["foo"])
self.assertEqual(["Invalid command prefix: \"foo\""], output.lines)
def testHelpCommandWithIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
help_intro = debugger_cli_common.RichTextLines(
["Introductory comments.", ""])
registry.set_help_intro(help_intro)
output = registry.dispatch_command("help", [])
self.assertEqual(help_intro.lines + [
"help", " Aliases: h", "", " Print this help message.", "", "",
"noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing.", "", ""
], output.lines)
class RegexFindTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
def testRegexFindWithoutExistingFontAttrSegs(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow")], new_screen_output.font_attr_segs[0])
self.assertEqual([(8, 11, "yellow")], new_screen_output.font_attr_segs[1])
# Check field in annotations carrying a list of matching line indices.
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithExistingFontAttrSegs(self):
# Add a font attribute segment first.
self._orig_screen_output.font_attr_segs[0] = [(9, 12, "red")]
self.assertEqual(1, len(self._orig_screen_output.font_attr_segs))
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow"), (9, 12, "red")],
new_screen_output.font_attr_segs[0])
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithNoMatches(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"infrared", "yellow")
self.assertEqual({}, new_screen_output.font_attr_segs)
self.assertEqual([], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testInvalidRegex(self):
with self.assertRaisesRegexp(ValueError, "Invalid regular expression"):
debugger_cli_common.regex_find(self._orig_screen_output, "[", "yellow")
def testRegexFindOnPrependedLinesWorks(self):
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["Roses are red"])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "red", "bold")
self.assertEqual(
{0: [(10, 13, "bold")]}, searched_rich_lines.font_attr_segs)
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["A poem"], font_attr_segs=[(0, 1, "underline")])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "poem", "italic")
self.assertEqual(
{0: [(0, 1, "underline"), (2, 6, "italic")]},
searched_rich_lines.font_attr_segs)
class WrapScreenOutputTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 5, "red"), (6, 9, "gray"), (10, 12, "red"),
(12, 13, "crimson")],
2: [(0, 7, "blue"), (8, 11, "gray"), (12, 14, "blue"),
(14, 16, "indigo")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
def testNoActualWrapping(self):
# Large column limit should lead to no actual wrapping.
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 100)
self.assertEqual(self._orig_screen_output.lines, out.lines)
self.assertEqual(self._orig_screen_output.font_attr_segs,
out.font_attr_segs)
self.assertEqual(self._orig_screen_output.annotations, out.annotations)
self.assertEqual(new_line_indices, [0, 1, 2])
def testWrappingWithAttrCutoff(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 11)
# Add non-row-index field to out.
out.annotations["metadata"] = "foo"
# Check wrapped text.
self.assertEqual(5, len(out.lines))
self.assertEqual("Folk song:", out.lines[0])
self.assertEqual("Roses are r", out.lines[1])
self.assertEqual("ed", out.lines[2])
self.assertEqual("Violets are", out.lines[3])
self.assertEqual(" blue", out.lines[4])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertEqual([(0, 5, "red"), (6, 9, "gray"), (10, 11, "red")],
out.font_attr_segs[1])
self.assertEqual([(0, 1, "red"), (1, 2, "crimson")], out.font_attr_segs[2])
self.assertEqual([(0, 7, "blue"), (8, 11, "gray")], out.font_attr_segs[3])
self.assertEqual([(1, 3, "blue"), (3, 5, "indigo")], out.font_attr_segs[4])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[1])
self.assertFalse(2 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[3])
self.assertFalse(4 in out.annotations)
# Chec that the non-row-index field is present in output.
self.assertEqual("foo", out.annotations["metadata"])
self.assertEqual(new_line_indices, [0, 1, 3])
def testWrappingWithMultipleAttrCutoff(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 12, "red")],
2: [(1, 16, "blue")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 5)
# Check wrapped text.
self.assertEqual(9, len(out.lines))
self.assertEqual("Folk ", out.lines[0])
self.assertEqual("song:", out.lines[1])
self.assertEqual("Roses", out.lines[2])
self.assertEqual(" are ", out.lines[3])
self.assertEqual("red", out.lines[4])
self.assertEqual("Viole", out.lines[5])
self.assertEqual("ts ar", out.lines[6])
self.assertEqual("e blu", out.lines[7])
self.assertEqual("e", out.lines[8])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertFalse(1 in out.font_attr_segs)
self.assertEqual([(0, 5, "red")], out.font_attr_segs[2])
self.assertEqual([(0, 5, "red")], out.font_attr_segs[3])
self.assertEqual([(0, 2, "red")], out.font_attr_segs[4])
self.assertEqual([(1, 5, "blue")], out.font_attr_segs[5])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[6])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[7])
self.assertEqual([(0, 1, "blue")], out.font_attr_segs[8])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertFalse(1 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[2])
self.assertFalse(3 in out.annotations)
self.assertFalse(4 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[5])
self.assertFalse(6 in out.annotations)
self.assertFalse(7 in out.annotations)
self.assertFalse(8 in out.annotations)
self.assertEqual(new_line_indices, [0, 2, 5])
def testWrappingInvalidArguments(self):
with self.assertRaisesRegexp(ValueError,
"Invalid type of input screen_output"):
debugger_cli_common.wrap_rich_text_lines("foo", 12)
with self.assertRaisesRegexp(ValueError, "Invalid type of input cols"):
debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines(["foo", "bar"]), "12")
def testWrappingEmptyInput(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines([]), 10)
self.assertEqual([], out.lines)
self.assertEqual([], new_line_indices)
class SliceRichTextLinesTest(test_util.TensorFlowTestCase):
def setUp(self):
self._original = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={
0: "longer wavelength",
1: "shorter wavelength",
"foo_metadata": "bar"
})
def testSliceBeginning(self):
sliced = self._original.slice(0, 1)
self.assertEqual(["Roses are red"], sliced.lines)
self.assertEqual({0: [(0, 5, "red")]}, sliced.font_attr_segs)
# Non-line-number metadata should be preseved.
self.assertEqual({
0: "longer wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testSliceEnd(self):
sliced = self._original.slice(1, 2)
self.assertEqual(["Violets are blue"], sliced.lines)
# The line index should have changed from 1 to 0.
self.assertEqual({0: [(0, 7, "blue")]}, sliced.font_attr_segs)
self.assertEqual({
0: "shorter wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testAttemptSliceWithNegativeIndex(self):
with self.assertRaisesRegexp(ValueError, "Encountered negative index"):
self._original.slice(0, -1)
class TabCompletionRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tc_reg = debugger_cli_common.TabCompletionRegistry()
# Register the items in an unsorted order deliberately, to test the sorted
# output from get_completions().
self._tc_reg.register_tab_comp_context(
["print_tensor", "pt"],
["node_b:1", "node_b:2", "node_a:1", "node_a:2"])
self._tc_reg.register_tab_comp_context(["node_info"],
["node_c", "node_b", "node_a"])
def testTabCompletion(self):
# The returned completions should have sorted order.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a:1", "node_a:2", "node_b:1", "node_b:2"],
"node_"), self._tc_reg.get_completions("pt", ""))
self.assertEqual((["node_a:1", "node_a:2"], "node_a:"),
self._tc_reg.get_completions("print_tensor", "node_a"))
self.assertEqual((["node_a:1"], "node_a:1"),
self._tc_reg.get_completions("pt", "node_a:1"))
self.assertEqual(([], ""),
self._tc_reg.get_completions("print_tensor", "node_a:3"))
self.assertEqual((None, None), self._tc_reg.get_completions("foo", "node_"))
def testExtendCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.extend_comp_items("print_tensor", ["node_A:1", "node_A:2"])
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
# Extending the completions for one of the context's context words should
# have taken effect on other context words of the same context as well.
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testExtendCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.extend_comp_items("foo", ["node_A:1", "node_A:2"])
def testRemoveCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.remove_comp_items("pt", ["node_a:1", "node_a:2"])
self.assertEqual((["node_b:1", "node_b:2"], "node_b:"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testRemoveCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.remove_comp_items("foo", ["node_a:1", "node_a:2"])
def testDeregisterContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
self.assertEqual((None, None),
self._tc_reg.get_completions("print_tensor", "node_"))
# The alternative context word should be unaffected.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
def testDeregisterNonexistentContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
with self.assertRaisesRegexp(
KeyError,
"Cannot deregister unregistered context word \"print_tensor\""):
self._tc_reg.deregister_context(["print_tensor"])
class CommandHistoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._history_file_path = tempfile.mktemp()
self._cmd_hist = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
def tearDown(self):
if os.path.isfile(self._history_file_path):
os.remove(self._history_file_path)
def _restoreFileReadWritePermissions(self, file_path):
os.chmod(file_path,
(stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR |
stat.S_IWGRP | stat.S_IWOTH))
def testLookUpMostRecent(self):
self.assertEqual([], self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("node_info node_b")
self.assertEqual(["node_info node_b"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(4))
# Go over the limit.
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(4))
def testLookUpPrefix(self):
self._cmd_hist.add_command("node_info node_b")
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.lookup_prefix("node_info", 10))
self.assertEqual(["node_info node_a"], self._cmd_hist.lookup_prefix(
"node_info", 1))
self.assertEqual([], self._cmd_hist.lookup_prefix("print_tensor", 10))
def testAddNonStrCommand(self):
with self.assertRaisesRegexp(
TypeError, "Attempt to enter non-str entry to command history"):
self._cmd_hist.add_command(["print_tensor node_a:0"])
def testRepeatingCommandsDoNotGetLoggedRepeatedly(self):
self._cmd_hist.add_command("help")
self._cmd_hist.add_command("help")
self.assertEqual(["help"], self._cmd_hist.most_recent_n(2))
def testCommandHistoryFileIsCreated(self):
self.assertFalse(os.path.isfile(self._history_file_path))
self._cmd_hist.add_command("help")
self.assertTrue(os.path.isfile(self._history_file_path))
with open(self._history_file_path, "rt") as f:
self.assertEqual(["help\n"], f.readlines())
def testLoadingCommandHistoryFileObeysLimit(self):
self._cmd_hist.add_command("help 1")
self._cmd_hist.add_command("help 2")
self._cmd_hist.add_command("help 3")
self._cmd_hist.add_command("help 4")
cmd_hist_2 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help 2", "help 3", "help 4"],
cmd_hist_2.most_recent_n(3))
with open(self._history_file_path, "rt") as f:
self.assertEqual(
["help 2\n", "help 3\n", "help 4\n"], f.readlines())
def testCommandHistoryHandlesReadingIOErrorGracoiusly(self):
with open(self._history_file_path, "wt") as f:
f.write("help\n")
# Change file to not readable by anyone.
os.chmod(self._history_file_path, 0)
# The creation of a CommandHistory object should not error out.
debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self._restoreFileReadWritePermissions(self._history_file_path)
def testCommandHistoryHandlesWritingIOErrorGracoiusly(self):
with open(self._history_file_path, "wt") as f:
f.write("help\n")
# Change file to read-only.
os.chmod(self._history_file_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Reading from the file should still work.
cmd_hist_2 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help"], cmd_hist_2.most_recent_n(1))
# Writing should no longer work, but it should fail silently and
# the within instance-command history should still work.
cmd_hist_2.add_command("foo")
self.assertEqual(["help", "foo"], cmd_hist_2.most_recent_n(2))
cmd_hist_3 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help"], cmd_hist_3.most_recent_n(1))
self._restoreFileReadWritePermissions(self._history_file_path)
class MenuNodeTest(test_util.TensorFlowTestCase):
def testCommandTypeConstructorSucceeds(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertEqual("water flower", menu_node.caption)
self.assertEqual("water_flower", menu_node.content)
def testDisableWorks(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertTrue(menu_node.is_enabled())
menu_node.disable()
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
def testConstructAsDisabledWorks(self):
menu_node = debugger_cli_common.MenuItem(
"water flower", "water_flower", enabled=False)
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
class MenuTest(test_util.TensorFlowTestCase):
def setUp(self):
self.menu = debugger_cli_common.Menu()
self.assertEqual(0, self.menu.num_items())
self.node1 = debugger_cli_common.MenuItem("water flower", "water_flower")
self.node2 = debugger_cli_common.MenuItem(
"measure wavelength", "measure_wavelength")
self.menu.append(self.node1)
self.menu.append(self.node2)
self.assertEqual(2, self.menu.num_items())
def testFormatAsSingleLineWithStrItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs="underline")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithListItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs=["underline", "bold"])
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline", "bold"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline", "bold"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithNoneItemAttrsWorks(self):
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testInsertNode(self):
self.assertEqual(["water flower", "measure wavelength"],
self.menu.captions())
node2 = debugger_cli_common.MenuItem("write poem", "write_poem")
self.menu.insert(1, node2)
self.assertEqual(["water flower", "write poem", "measure wavelength"],
self.menu.captions())
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, write poem, measure wavelength, "],
output.lines)
def testFormatAsSingleLineWithDisabledNode(self):
node2 = debugger_cli_common.MenuItem(
"write poem", "write_poem", enabled=False)
self.menu.append(node2)
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", disabled_item_attrs="bold")
self.assertEqual(["Menu: water flower, measure wavelength, write poem, "],
output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual((40, 50, ["bold"]), output.font_attr_segs[0][2])
if __name__ == "__main__":
googletest.main()
| apache-2.0 | 5,202,102,476,996,131,000 | -4,693,513,243,091,393,000 | 38.48914 | 80 | 0.635484 | false |
Frankenmint/p2pool | p2pool/test/util/test_forest.py | 283 | 6804 | import random
import unittest
from p2pool.util import forest, math
class DumbTracker(object):
def __init__(self, items=[]):
self.items = {} # hash -> item
self.reverse = {} # previous_hash -> set of item_hashes
for item in items:
self.add(item)
def add(self, item):
if item.hash in self.items:
raise ValueError('item already present')
self.items[item.hash] = item
self.reverse.setdefault(item.previous_hash, set()).add(item.hash)
def remove(self, item_hash):
item = self.items[item_hash]
del item_hash
self.items.pop(item.hash)
self.reverse[item.previous_hash].remove(item.hash)
if not self.reverse[item.previous_hash]:
self.reverse.pop(item.previous_hash)
@property
def heads(self):
return dict((x, self.get_last(x)) for x in self.items if x not in self.reverse)
@property
def tails(self):
return dict((x, set(y for y in self.items if self.get_last(y) == x and y not in self.reverse)) for x in self.reverse if x not in self.items)
def get_nth_parent_hash(self, item_hash, n):
for i in xrange(n):
item_hash = self.items[item_hash].previous_hash
return item_hash
def get_height(self, item_hash):
height, last = self.get_height_and_last(item_hash)
return height
def get_last(self, item_hash):
height, last = self.get_height_and_last(item_hash)
return last
def get_height_and_last(self, item_hash):
height = 0
while item_hash in self.items:
item_hash = self.items[item_hash].previous_hash
height += 1
return height, item_hash
def get_chain(self, start_hash, length):
# same implementation :/
assert length <= self.get_height(start_hash)
for i in xrange(length):
yield self.items[start_hash]
start_hash = self.items[start_hash].previous_hash
def is_child_of(self, item_hash, possible_child_hash):
if self.get_last(item_hash) != self.get_last(possible_child_hash):
return None
while True:
if possible_child_hash == item_hash:
return True
if possible_child_hash not in self.items:
return False
possible_child_hash = self.items[possible_child_hash].previous_hash
class FakeShare(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
self._attrs = kwargs
def test_tracker(self):
t = DumbTracker(self.items.itervalues())
assert self.items == t.items, (self.items, t.items)
assert self.reverse == t.reverse, (self.reverse, t.reverse)
assert self.heads == t.heads, (self.heads, t.heads)
assert self.tails == t.tails, (self.tails, t.tails)
if random.random() < 0.9:
return
for start in self.items:
a, b = self.get_height_and_last(start), t.get_height_and_last(start)
assert a == b, (a, b)
other = random.choice(self.items.keys())
assert self.is_child_of(start, other) == t.is_child_of(start, other)
assert self.is_child_of(other, start) == t.is_child_of(other, start)
length = random.randrange(a[0])
assert list(self.get_chain(start, length)) == list(t.get_chain(start, length))
def generate_tracker_simple(n):
t = forest.Tracker(math.shuffled(FakeShare(hash=i, previous_hash=i - 1 if i > 0 else None) for i in xrange(n)))
test_tracker(t)
return t
def generate_tracker_random(n):
items = []
for i in xrange(n):
x = random.choice(items + [FakeShare(hash=None), FakeShare(hash=random.randrange(1000000, 2000000))]).hash
items.append(FakeShare(hash=i, previous_hash=x))
t = forest.Tracker(math.shuffled(items))
test_tracker(t)
return t
class Test(unittest.TestCase):
def test_tracker(self):
t = generate_tracker_simple(100)
assert t.heads == {99: None}
assert t.tails == {None: set([99])}
assert t.get_nth_parent_hash(90, 50) == 90 - 50
assert t.get_nth_parent_hash(91, 42) == 91 - 42
def test_get_nth_parent_hash(self):
t = generate_tracker_simple(200)
for i in xrange(1000):
a = random.randrange(200)
b = random.randrange(a + 1)
res = t.get_nth_parent_hash(a, b)
assert res == a - b, (a, b, res)
def test_tracker2(self):
for ii in xrange(20):
t = generate_tracker_random(random.randrange(100))
#print "--start--"
while t.items:
while True:
try:
t.remove(random.choice(list(t.items)))
except NotImplementedError:
pass # print "aborted", x
else:
break
test_tracker(t)
def test_tracker3(self):
for ii in xrange(10):
items = []
for i in xrange(random.randrange(100)):
x = random.choice(items + [FakeShare(hash=None), FakeShare(hash=random.randrange(1000000, 2000000))]).hash
items.append(FakeShare(hash=i, previous_hash=x))
t = forest.Tracker()
test_tracker(t)
for item in math.shuffled(items):
t.add(item)
test_tracker(t)
if random.randrange(3) == 0:
while True:
try:
t.remove(random.choice(list(t.items)))
except NotImplementedError:
pass
else:
break
test_tracker(t)
for item in math.shuffled(items):
if item.hash not in t.items:
t.add(item)
test_tracker(t)
if random.randrange(3) == 0:
while True:
try:
t.remove(random.choice(list(t.items)))
except NotImplementedError:
pass
else:
break
test_tracker(t)
while t.items:
while True:
try:
t.remove(random.choice(list(t.items)))
except NotImplementedError:
pass
else:
break
test_tracker(t)
| gpl-3.0 | 1,900,910,112,477,709,800 | -4,605,529,985,937,470,500 | 34.072165 | 148 | 0.521752 | false |
40223143/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/copyreg.py | 749 | 6611 | """Helper to provide extensibility for pickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if not callable(pickle_function):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not callable(object):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError("can't pickle %s objects" % base.__name__)
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, str):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: pickling grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError("code out of range")
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
| gpl-3.0 | -5,497,218,380,683,354,000 | -4,316,910,546,430,299,000 | 32.729592 | 75 | 0.602783 | false |
kchodorow/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn.py | 9 | 31036 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
_CENTERED_BIAS_WEIGHT = "centered_bias_weight"
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `_Head` instance.
* hidden_units: List of hidden units per layer.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use the Adagrad
optimizer with a default learning rate of 0.05.
* activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
* dropout: When not `None`, the probability we will drop out a given
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
* input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
head = params["head"]
hidden_units = params["hidden_units"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or "Adagrad"
activation_fn = params.get("activation_fn")
dropout = params.get("dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
input_layer_min_slice_size = (
params.get("input_layer_min_slice_size") or 64 << 20)
num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
features = _get_feature_dict(features)
parent_scope = "dnn"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=input_layer_min_slice_size))
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as input_layer_scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=input_layer_scope)
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(net,)) as hidden_layer_scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=activation_fn,
variables_collections=[parent_scope],
scope=hidden_layer_scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
with variable_scope.variable_scope(
"logits",
values=(net,)) as logits_scope:
logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(logits, logits_scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, parent_scope,
input_layer_scope.name)),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class DNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a DNNClassifier instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
It must be greater than 1. Note: Class labels are integers representing
the class index (i.e. values from 0 to n_classes-1). For arbitrary
label values (e.g. string labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNClassifier` estimator.
Raises:
ValueError: If `n_classes` < 2.
"""
self._hidden_units = hidden_units
self._feature_columns = tuple(feature_columns or [])
self._enable_centered_bias = enable_centered_bias
super(DNNClassifier, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": self._feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self,
x=None,
input_fn=None,
batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return super(DNNClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
hiddenlayer_weights = [
self.get_variable_value("dnn/hiddenlayer_%d/weights" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_weights = [self.get_variable_value("dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
hiddenlayer_bias = [
self.get_variable_value("dnn/hiddenlayer_%d/biases" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_bias = [self.get_variable_value("dnn/logits/biases")]
if self._enable_centered_bias:
centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)]
else:
centered_bias = []
return hiddenlayer_bias + logits_bias + centered_bias
class DNNRegressor(estimator.Estimator):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
label_dimension=1,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
super(DNNRegressor, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": self._feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""See evaluable.Evaluable."""
# TODO(zakaria): remove once deprecation is finished (b/31229024)
custom_metrics = {}
if metrics:
for key, metric in six.iteritems(metrics):
if (not isinstance(metric, metric_spec.MetricSpec) and
not isinstance(key, tuple)):
custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric
else:
custom_metrics[key] = metric
return super(DNNRegressor, self).evaluate(
x=x,
y=y,
input_fn=input_fn,
feed_fn=feed_fn,
batch_size=batch_size,
steps=steps,
metrics=custom_metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return super(DNNRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=signature_fn or export.regression_signature_fn,
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
| apache-2.0 | -3,325,630,181,535,137,300 | 1,102,605,837,321,408,500 | 39.676278 | 104 | 0.646475 | false |
cmelange/ansible | lib/ansible/modules/cloud/webfaction/webfaction_site.py | 6 | 7096 | #!/usr/bin/python
#
# Create Webfaction website using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the website
required: true
state:
description:
- Whether the website should exist
required: false
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
required: true
https:
description:
- Whether or not to use HTTPS
required: false
choices:
- true
- false
default: 'false'
site_apps:
description:
- A mapping of URLs to apps
required: false
subdomains:
description:
- A list of subdomains associated with this site.
required: false
default: null
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: create website
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
https: no
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
host = dict(required=True),
https = dict(required=False, type='bool', default=False),
subdomains = dict(required=False, type='list', default=[]),
site_apps = dict(required=False, type='list', default=[]),
login_name = dict(required=True),
login_password = dict(required=True, no_log=True),
),
supports_check_mode=True
)
site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
site_list = webfaction.list_websites(session_id)
site_map = dict([(i['name'], i) for i in site_list])
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a site with this name already exist?
if existing_site:
# If yes, but it's on a different IP address, then fail.
# If we wanted to allow relocation, we could add a 'relocate=true' option
# which would get the existing IP address, delete the site there, and create it
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
# deletion if it's on another host.
if existing_site['ip'] != site_ip:
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
# If it's on this host and the key parameters are the same, nothing needs to be done.
if (existing_site['https'] == module.boolean(module.params['https'])) and \
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
changed = False
)
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]
for a in module.params['site_apps']:
positional_args.append( (a[0], a[1]) )
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
) if not existing_site else webfaction.update_website (
*positional_args
)
)
elif site_state == 'absent':
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the site
result.update(
webfaction.delete_website(session_id, site_name, site_ip)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 5,313,874,628,795,848,000 | 2,699,538,553,447,699,000 | 31.851852 | 353 | 0.601043 | false |
d2emon/generator-pack | src/genesys/generator/generator/name.py | 1 | 2378 | import random
from fixtures import generator_data
GENDER_NEUTRAL = 0
GENDER_MALE = 1
GENDER_FEMALE = 2
def random_generator(selector, generator_id=None, max_value=10):
if generator_id is None:
generator_id = random.randrange(max_value)
return selector(generator_id)
class Name:
glue = ""
def __init__(self, *items, generator=None):
self._items = items
self.generator = generator
@classmethod
def build_name(cls, items):
return cls.glue.join(items).title()
@property
def value(self):
return self.build_name(self._items)
@value.setter
def __set_name__(self, value):
self._items = value,
def __str__(self):
return self.value
def __repr__(self):
return str(self)
class TextGenerator:
block_id = ''
groups = ()
data = []
def __init__(self, data=None):
data = data or generator_data
self.data = data[self.block_id]
def generate_data(self, group_id=None):
if len(self.groups) < 1:
return self.data
if group_id is None:
group_id = random.choice(self.groups)
return self.data.get(group_id)
def generate(self, group_id=None, item_id=None, *args, **kwargs):
return Name(random.choice(self.generate_data(group_id)), generator=self)
def __iter__(self):
return self
def __next__(self):
return self.generate()
class NameGenerator(TextGenerator):
block_id = 'names'
name_group_id = 'aliens'
gender = GENDER_NEUTRAL
def __init__(self, data=None):
super().__init__(data)
self.data = self.data[self.name_group_id]
def generate(self, race_id=None, item_id=None, *args, **kwargs):
parts = (random.choice(part) for part in self.generate_data(race_id))
return Name(*parts, generator=self)
class ListNameGenerator(NameGenerator):
names = []
@classmethod
def select_names(cls, *args, **kwargs):
return cls.names
@classmethod
def generate(cls, gender=GENDER_NEUTRAL, *args, **kwargs):
return Name(random.choice(cls.select_names(gender=gender, *args, **kwargs)), cls)
class GenderListNameGenerator(ListNameGenerator):
names = dict()
@classmethod
def select_names(cls, gender=GENDER_NEUTRAL, *args, **kwargs):
return cls.names[gender]
| gpl-3.0 | -9,207,126,260,684,133,000 | -7,380,854,104,180,485,000 | 22.78 | 89 | 0.619849 | false |
Avsecz/concise | concise/legacy/models.py | 2 | 4949 | """Template for models
"""
from keras.models import Model
from keras.optimizers import Adam
import keras.layers as kl
import keras.initializers as ki
import keras.regularizers as kr
# concise modules
from concise import initializers as ci
from concise import layers as cl
from concise.utils import PWM
# ### 'First' Concise architecture from Tensorflow
# Splines:
# - `spline_score = X_spline %*% spline_weights`
# - Transform:
# - `exp(spline_score)`
# - `spline_score + 1`
# Linear features:
# - `lm_feat = X_feat %*% feature_weights`
# Model:
# - conv2d, `padding = "valid", w = motif_base_weights`
# - activation: exp or relu, bias = motif_bias
# - elementwise_multiply: `hidden * spline_score`
# - pooling: max, sum or mean (accross the whole model)
# - Optionally: multiply by non-linear scaling factor (model fitting)
# - `pool_layer %*% motif_weights + X_feat %*% feature_weights + final_bias`
# - loss: mse
# - optimizer: Adam, optionally l-BFGS
# Regularization:
# - motif_base_weights, L1: motif_lamb
# - motif_weights, L1: lambd
# - spline_weights:
# - `diag(t(spline_weights) %*% S %*% spline_weights)`, L2_mean: spline_lamb
# - spline_weights, L2 / n_spline_tracks: spline_param_lamb
# convolution model
def single_layer_pos_effect(pooling_layer="sum", # 'sum', 'max' or 'mean'
nonlinearity="relu", # 'relu' or 'exp'
motif_length=9,
n_motifs=6, # number of filters
step_size=0.01,
num_tasks=1, # multi-task learning - 'trans'
n_covariates=0,
seq_length=100, # pre-defined sequence length
# splines
n_splines=None,
share_splines=False, # should the positional bias be shared across motifs
# regularization
lamb=1e-5, # overall motif coefficient regularization
motif_lamb=1e-5,
spline_lamb=1e-5,
spline_param_lamb=1e-5,
# initialization
init_motifs=None, # motifs to intialize
init_motif_bias=0,
init_sd_motif=1e-2,
init_sd_w=1e-3, # initial weight scale of feature w or motif w
**kwargs): # unused params
# initialize conv kernels to known motif pwm's
if init_motifs:
# WARNING - initialization is not the same as for Concise class
pwm_list = [PWM.from_consensus(motif) for motif in init_motifs]
kernel_initializer = ci.PWMKernelInitializer(pwm_list, stddev=init_sd_motif)
bias_initializer = ci.PWMBiasInitializer(pwm_list, kernel_size=motif_length)
else:
# kernel_initializer = "glorot_uniform"
kernel_initializer = ki.RandomNormal(stddev=init_sd_motif)
bias_initializer = ki.Constant(value=init_motif_bias)
activation = nonlinearity # supports 'relu' out-of-the-box
# define the model
# ----------------
inputs = []
seq_input = kl.Input((seq_length, 4))
inputs.append(seq_input)
# convolution
xseq = kl.Conv1D(filters=n_motifs, kernel_size=motif_length,
kernel_regularizer=kr.l1(l=motif_lamb), # Regularization
activation=activation,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer
)(seq_input)
# optional positional effect
if n_splines:
xseq = cl.GAMSmooth(n_bases=n_splines,
share_splines=share_splines,
l2_smooth=spline_lamb,
l2=spline_param_lamb,
)(xseq)
# pooling layer
if pooling_layer is "max":
xseq = kl.pooling.GlobalMaxPooling1D()(xseq)
elif pooling_layer is "mean":
xseq = kl.pooling.GlobalAveragePooling1D()(xseq)
elif pooling_layer is "sum":
xseq = cl.GlobalSumPooling1D()(xseq)
else:
raise ValueError("pooling_layer can only be 'sum', 'mean' or 'max'.")
# -----
# add covariates
if n_covariates:
cov_input = kl.Input((n_covariates, ))
inputs.append(cov_input)
x = kl.concatenate([xseq, cov_input])
else:
x = xseq
# -----
predictions = kl.Dense(units=num_tasks,
kernel_regularizer=kr.l1(lamb),
kernel_initializer=ki.RandomNormal(stddev=init_sd_w)
)(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=Adam(lr=step_size), loss="mse", metrics=["mse"])
return model
| mit | -4,183,290,142,632,403,000 | 3,497,269,871,745,166,000 | 37.664063 | 102 | 0.55486 | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/google/protobuf/internal/service_reflection_test.py | 75 | 5170 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.internal.service_reflection."""
__author__ = '[email protected] (Petar Petrov)'
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_pb2
from google.protobuf import service_reflection
from google.protobuf import service
class FooUnitTest(unittest.TestCase):
def testService(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request, response, callback):
self.method = method
self.controller = controller
self.request = request
callback(response)
class MockRpcController(service.RpcController):
def SetFailed(self, msg):
self.failure_message = msg
self.callback_response = None
class MyService(unittest_pb2.TestService):
pass
self.callback_response = None
def MyCallback(response):
self.callback_response = response
rpc_controller = MockRpcController()
channel = MockRpcChannel()
srvc = MyService()
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual('Method Foo not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
rpc_controller.failure_message = None
service_descriptor = unittest_pb2.TestService.GetDescriptor()
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual('Method Bar not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
class MyServiceImpl(unittest_pb2.TestService):
def Foo(self, rpc_controller, request, done):
self.foo_called = True
def Bar(self, rpc_controller, request, done):
self.bar_called = True
srvc = MyServiceImpl()
rpc_controller.failure_message = None
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.foo_called)
rpc_controller.failure_message = None
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.bar_called)
def testServiceStub(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request,
response_class, callback):
self.method = method
self.controller = controller
self.request = request
callback(response_class())
self.callback_response = None
def MyCallback(response):
self.callback_response = response
channel = MockRpcChannel()
stub = unittest_pb2.TestService_Stub(channel)
rpc_controller = 'controller'
request = 'request'
# GetDescriptor now static, still works as instance method for compatibility
self.assertEqual(unittest_pb2.TestService_Stub.GetDescriptor(),
stub.GetDescriptor())
# Invoke method.
stub.Foo(rpc_controller, request, MyCallback)
self.assertIsInstance(self.callback_response, unittest_pb2.FooResponse)
self.assertEqual(request, channel.request)
self.assertEqual(rpc_controller, channel.controller)
self.assertEqual(stub.GetDescriptor().methods[0], channel.method)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | -2,487,100,909,059,620,400 | 3,376,519,691,975,508,000 | 35.928571 | 80 | 0.724178 | false |
blueburningcoder/nupic | ci/travis/deploy-wheel-to-s3.py | 34 | 1809 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
import boto
from boto.s3.key import Key
# This script assumes the following environment variables are set for boto:
# - AWS_ACCESS_KEY_ID
# - AWS_SECRET_ACCESS_KEY
REGION = "us-west-2"
BUCKET = "artifacts.numenta.org"
RELEASE_FOLDER = "numenta/nupic/releases"
def upload(artifactsBucket, wheelFileName, wheelPath):
key = Key(artifactsBucket)
key.key = "%s/%s" % (RELEASE_FOLDER, wheelFileName)
print "Uploading %s to %s/%s..." % (wheelFileName, BUCKET, RELEASE_FOLDER)
key.set_contents_from_filename(wheelPath)
def run(wheelPath):
wheelFileName = os.path.basename(wheelPath)
conn = boto.connect_s3()
artifactsBucket = conn.get_bucket(BUCKET)
upload(artifactsBucket, wheelFileName, wheelPath)
if __name__ == "__main__":
wheelPath = sys.argv[1]
run(wheelPath)
| agpl-3.0 | 779,394,986,865,380,400 | 127,298,102,995,627,660 | 31.303571 | 76 | 0.68325 | false |
tquizzle/Sick-Beard | sickbeard/clients/requests/utils.py | 204 | 17497 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import os
import platform
import re
import sys
import zlib
from netrc import netrc, NetrcParseError
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import quote, urlparse, bytes, str, OrderedDict, urlunparse
from .cookies import RequestsCookieJar, cookiejar_from_dict
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
return os.fstat(o.fileno()).st_size
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES)
netrc_path = None
for loc in locations:
if os.path.exists(loc) and not netrc_path:
netrc_path = loc
# Abort early if there isn't one.
if netrc_path is None:
return netrc_path
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, dict):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
return charset_re.findall(content)
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode('', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
def stream_decompress(iterator, mode='gzip'):
"""Stream decodes an iterator over compressed data
:param iterator: An iterator over compressed data
:param mode: 'gzip' or 'deflate'
:return: An iterator over decompressed data
"""
if mode not in ['gzip', 'deflate']:
raise ValueError('stream_decompress mode must be gzip or deflate')
zlib_mode = 16 + zlib.MAX_WBITS if mode == 'gzip' else -zlib.MAX_WBITS
dec = zlib.decompressobj(zlib_mode)
try:
for chunk in iterator:
rv = dec.decompress(chunk)
if rv:
yield rv
except zlib.error:
# If there was an error decompressing, just return the raw chunk
yield chunk
# Continue to return the rest of the raw data
for chunk in iterator:
yield chunk
else:
# Make sure everything has been returned from the decompression object
buf = dec.decompress(bytes())
rv = buf + dec.flush()
if rv:
yield rv
def stream_untransfer(gen, resp):
ce = resp.headers.get('content-encoding', '').lower()
if 'gzip' in ce:
gen = stream_decompress(gen, mode='gzip')
elif 'deflate' in ce:
gen = stream_decompress(gen, mode='deflate')
return gen
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
c = chr(int(h, 16))
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
proxy_keys = [
'all',
'http',
'https',
'ftp',
'socks'
]
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.split(',')
netloc = urlparse(url).netloc
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
# If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to.
proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys]
return dict([(key, val) for (key, val) in proxies if val])
def default_user_agent():
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['python-requests/%s' % __version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return {
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')),
'Accept': '*/*'
}
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
if url:
parsed = urlparse(url)
return (parsed.username, parsed.password)
else:
return ('', '')
| gpl-3.0 | -6,801,895,894,382,924,000 | 4,124,751,646,315,493,400 | 28.605753 | 118 | 0.605018 | false |
skuicloud/chef-openstack-cookbooks | cookbooks/openstack-image/files/default/glance_plugin.py | 10 | 4858 | from glance.client import V1Client
from glance.common import exception
import collectd
global NAME, OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL, OS_AUTH_STRATEGY, VERBOSE_LOGGING
NAME = "glance_plugin"
OS_USERNAME = "username"
OS_PASSWORD = "password"
OS_TENANT_NAME = "tenantname"
OS_AUTH_URL = "http://localhost:5000/v2.0"
OS_AUTH_STRATEGY = "keystone"
VERBOSE_LOGGING = False
def get_stats(user, passwd, tenant, url, host=None):
creds = {"username": user, "password": passwd, "tenant": tenant,"auth_url": url, "strategy": OS_AUTH_STRATEGY}
client = V1Client(host,creds=creds)
try:
image_list = client.get_images_detailed()
except exception.NotAuthenticated:
msg = "Client credentials appear to be invalid"
raise exception.ClientConnectionError(msg)
else:
# TODO(shep): this needs to be rewritten more inline with the keystone|nova plugins
data = dict()
data["count"] = int(len(image_list))
data["bytes"] = 0
data["snapshot.count"] = 0
data["snapshot.bytes"] = 0
data["tenant"] = dict()
for image in image_list:
data["bytes"] += int(image["size"])
if "image_type" in image["properties"] and image["properties"]["image_type"] == "snapshot":
data["snapshot.count"] += 1
data["snapshot.bytes"] += int(image["size"])
uuid = str(image["owner"])
if uuid in data["tenant"]:
data["tenant"][uuid]["count"] += 1
data["tenant"][uuid]["bytes"] += int(image["size"])
if "image_type" in image["properties"] and image["properties"]["image_type"] == "snapshot":
data["tenant"][uuid]["snapshot.count"] += 1
data["tenant"][uuid]["snapshot.bytes"] += int(image["size"])
else:
data["tenant"][uuid] = dict()
data["tenant"][uuid]["count"] = 1
data["tenant"][uuid]["bytes"] = int(image["size"])
data["tenant"][uuid]["snapshot.count"] = 0
data["tenant"][uuid]["snapshot.bytes"] = 0
if "image_type" in image["properties"] and image["properties"]["image_type"] == "snapshot":
data["tenant"][uuid]["snapshot.count"] += 1
data["tenant"][uuid]["snapshot.bytes"] += int(image["size"])
# debug
#for key in data.keys():
# if key == "tenant":
# for uuid in data[key].keys():
# for field in data[key][uuid]:
# print "glance.images.tenant.%s.%s : %i" % (uuid, field, data[key][uuid][field])
# else:
# print "glance.images.%s : %i" % (key, data[key])
##########
return data
def configure_callback(conf):
"""Received configuration information"""
global OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL
for node in conf.children:
if node.key == "Username":
OS_USERNAME = node.values[0]
elif node.key == "Password":
OS_PASSWORD = node.values[0]
elif node.key == "TenantName":
OS_TENANT_NAME = node.values[0]
elif node.key == "AuthURL":
OS_AUTH_URL = node.values[0]
elif node.key == "Verbose":
VERBOSE_LOGGING = node.values[0]
else:
logger("warn", "Unknown config key: %s" % node.key)
def read_callback():
logger("verb", "read_callback")
info = get_stats(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL)
if not info:
logger("err", "No information received")
return
for key in info.keys():
if key == "tenant":
for uuid in info[key].keys():
for field in info[key][uuid]:
logger('verb', 'Dispatching glance.images.tenant.%s.%s : %i' % (uuid, field, int(info[key][uuid][field])))
path = 'glance.images.%s.%s' % (uuid, field)
val = collectd.Values(plugin=path)
val.type = 'gauge'
val.values = [int(info[key][uuid][field])]
val.dispatch()
else:
logger('verb', 'Dispatching %s : %i' % (key, int(info[key])))
path = 'glance.images.%s' % (key)
val = collectd.Values(plugin=path)
val.type = 'gauge'
val.values = [int(info[key])]
val.dispatch()
def logger(t, msg):
if t == 'err':
collectd.error('%s: %s' % (NAME, msg))
if t == 'warn':
collectd.warning('%s: %s' % (NAME, msg))
elif t == 'verb' and VERBOSE_LOGGING == True:
collectd.info('%s: %s' % (NAME, msg))
collectd.register_config(configure_callback)
collectd.warning("Initializing glance plugin")
collectd.register_read(read_callback)
| apache-2.0 | 3,239,959,831,362,195,000 | 8,808,456,998,115,502,000 | 40.521368 | 126 | 0.543228 | false |
40223243/40223243w17 | static/Brython3.1.1-20150328-091302/Lib/_dummy_thread.py | 742 | 4769 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
| gpl-3.0 | -1,909,916,534,498,648,600 | -1,117,517,331,198,684,400 | 30.582781 | 79 | 0.648144 | false |
BeATz-UnKNoWN/python-for-android | python-modules/twisted/twisted/mail/topfiles/setup.py | 54 | 1918 | # Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
try:
from twisted.python import dist
except ImportError:
raise SystemExit("twisted.python.dist module not found. Make sure you "
"have installed the Twisted core package before "
"attempting to install any other Twisted projects.")
if __name__ == '__main__':
if sys.version_info[:2] >= (2, 4):
extraMeta = dict(
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Communications :: Email :: Post-Office :: IMAP",
"Topic :: Communications :: Email :: Post-Office :: POP3",
"Topic :: Software Development :: Libraries :: Python Modules",
])
else:
extraMeta = {}
dist.setup(
twisted_subproject="mail",
scripts=dist.getScripts("mail"),
# metadata
name="Twisted Mail",
description="A Twisted Mail library, server and client.",
author="Twisted Matrix Laboratories",
author_email="[email protected]",
maintainer="Jp Calderone",
url="http://twistedmatrix.com/trac/wiki/TwistedMail",
license="MIT",
long_description="""\
An SMTP, IMAP and POP protocol implementation together with clients
and servers.
Twisted Mail contains high-level, efficient protocol implementations
for both clients and servers of SMTP, POP3, and IMAP4. Additionally,
it contains an "out of the box" combination SMTP/POP3 virtual-hosting
mail server. Also included is a read/write Maildir implementation and
a basic Mail Exchange calculator.
""",
**extraMeta)
| apache-2.0 | 515,105,781,269,126,340 | 939,129,829,628,386,000 | 37.36 | 79 | 0.618874 | false |
dingocuster/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause | 3,805,183,152,449,051,000 | 1,435,446,357,430,458,600 | 38.225806 | 79 | 0.617599 | false |
vitaly-krugl/nupic | src/nupic/data/generators/data_generator.py | 10 | 16250 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random as rand
from nupic.encoders import adaptive_scalar, sdr_category, date
from nupic.bindings.math import GetNTAReal
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.generators.distributions import *
realDType = GetNTAReal()
class DataGenerator():
"""The DataGenerator provides a framework for generating, encoding, saving
and exporting records. Each column of the output contains records with a
specific set of parameters such as encoderType, n, w, etc. This interface
is intended to be used for testing the spatial pooler, temporal memory and
for generating artificial datasets.
"""
def __init__(self, name='testDataset', seed=42, verbosity=0):
"""Initialize the dataset generator with a random seed and a name"""
self.name=name
self.verbosity=verbosity
self.setSeed(seed)
self.fields=[]
def getDescription(self):
"""Returns a description of the dataset"""
description = {'name':self.name, 'fields':[f.name for f in self.fields], \
'numRecords by field':[f.numRecords for f in self.fields]}
return description
def setSeed(self, seed):
"""Set the random seed and the numpy seed
Parameters:
--------------------------------------------------------------------
seed: random seed
"""
rand.seed(seed)
np.random.seed(seed)
def addField(self, name, fieldParams, encoderParams):
"""Add a single field to the dataset.
Parameters:
-------------------------------------------------------------------
name: The user-specified name of the field
fieldSpec: A list of one or more dictionaries specifying parameters
to be used for dataClass initialization. Each dict must
contain the key 'type' that specifies a distribution for
the values in this field
encoderParams: Parameters for the field encoder
"""
assert fieldParams is not None and'type' in fieldParams
dataClassName = fieldParams.pop('type')
try:
dataClass=eval(dataClassName)(fieldParams)
except TypeError, e:
print ("#### Error in constructing %s class object. Possibly missing "
"some required constructor parameters. Parameters "
"that were provided are: %s" % (dataClass, fieldParams))
raise
encoderParams['dataClass']=dataClass
encoderParams['dataClassName']=dataClassName
fieldIndex = self.defineField(name, encoderParams)
def addMultipleFields(self, fieldsInfo):
"""Add multiple fields to the dataset.
Parameters:
-------------------------------------------------------------------
fieldsInfo: A list of dictionaries, containing a field name, specs for
the data classes and encoder params for the corresponding
field.
"""
assert all(x in field for x in ['name', 'fieldSpec', 'encoderParams'] for field \
in fieldsInfo)
for spec in fieldsInfo:
self.addField(spec.pop('name'), spec.pop('fieldSpec'), spec.pop('encoderParams'))
def defineField(self, name, encoderParams=None):
"""Initialize field using relevant encoder parameters.
Parameters:
-------------------------------------------------------------------
name: Field name
encoderParams: Parameters for the encoder.
Returns the index of the field
"""
self.fields.append(_field(name, encoderParams))
return len(self.fields)-1
def setFlag(self, index, flag):
"""Set flag for field at index. Flags are special characters such as 'S' for
sequence or 'T' for timestamp.
Parameters:
--------------------------------------------------------------------
index: index of field whose flag is being set
flag: special character
"""
assert len(self.fields)>index
self.fields[index].flag=flag
def generateRecord(self, record):
"""Generate a record. Each value is stored in its respective field.
Parameters:
--------------------------------------------------------------------
record: A 1-D array containing as many values as the number of fields
fields: An object of the class field that specifies the characteristics
of each value in the record
Assertion:
--------------------------------------------------------------------
len(record)==len(fields): A value for each field must be specified.
Replace missing values of any type by
SENTINEL_VALUE_FOR_MISSING_DATA
This method supports external classes but not combination of classes.
"""
assert(len(record)==len(self.fields))
if record is not None:
for x in range(len(self.fields)):
self.fields[x].addValue(record[x])
else:
for field in self.fields:
field.addValue(field.dataClass.getNext())
def generateRecords(self, records):
"""Generate multiple records. Refer to definition for generateRecord"""
if self.verbosity>0: print 'Generating', len(records), 'records...'
for record in records:
self.generateRecord(record)
def getRecord(self, n=None):
"""Returns the nth record"""
if n is None:
assert len(self.fields)>0
n = self.fields[0].numRecords-1
assert (all(field.numRecords>n for field in self.fields))
record = [field.values[n] for field in self.fields]
return record
def getAllRecords(self):
"""Returns all the records"""
values=[]
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
for x in range(numRecords):
values.append(self.getRecord(x))
return values
def encodeRecord(self, record, toBeAdded=True):
"""Encode a record as a sparse distributed representation
Parameters:
--------------------------------------------------------------------
record: Record to be encoded
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields
"""
encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i in \
xrange(len(self.fields))]
return encoding
def encodeAllRecords(self, records=None, toBeAdded=True):
"""Encodes a list of records.
Parameters:
--------------------------------------------------------------------
records: One or more records. (i,j)th element of this 2D array
specifies the value at field j of record i.
If unspecified, records previously generated and stored are
used.
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields
"""
if records is None:
records = self.getAllRecords()
if self.verbosity>0: print 'Encoding', len(records), 'records.'
encodings = [self.encodeRecord(record, toBeAdded) for record in records]
return encodings
def addValueToField(self, i, value=None):
"""Add 'value' to the field i.
Parameters:
--------------------------------------------------------------------
value: value to be added
i: value is added to field i
"""
assert(len(self.fields)>i)
if value is None:
value = self.fields[i].dataClass.getNext()
self.fields[i].addValue(value)
return value
else: self.fields[i].addValue(value)
def addValuesToField(self, i, numValues):
"""Add values to the field i."""
assert(len(self.fields)>i)
values = [self.addValueToField(i) for n in range(numValues)]
return values
def getSDRforValue(self, i, j):
"""Returns the sdr for jth value at column i"""
assert len(self.fields)>i
assert self.fields[i].numRecords>j
encoding = self.fields[i].encodings[j]
return encoding
def getZeroedOutEncoding(self, n):
"""Returns the nth encoding with the predictedField zeroed out"""
assert all(field.numRecords>n for field in self.fields)
encoding = np.concatenate([field.encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)\
if field.isPredictedField else field.encodings[n] for field in self.fields])
return encoding
def getTotaln(self):
"""Returns the cumulative n for all the fields in the dataset"""
n = sum([field.n for field in self.fields])
return n
def getTotalw(self):
"""Returns the cumulative w for all the fields in the dataset"""
w = sum([field.w for field in self.fields])
return w
def getEncoding(self, n):
"""Returns the nth encoding"""
assert (all(field.numEncodings>n for field in self.fields))
encoding = np.concatenate([field.encodings[n] for field in self.fields])
return encoding
def getAllEncodings(self):
"""Returns encodings for all the records"""
numEncodings=self.fields[0].numEncodings
assert (all(field.numEncodings==numEncodings for field in self.fields))
encodings = [self.getEncoding(index) for index in range(numEncodings)]
return encodings
def getAllFieldNames(self):
"""Returns all field names"""
names = [field.name for field in self.fields]
return names
def getAllFlags(self):
"""Returns flags for all fields"""
flags = [field.flag for field in self.fields]
return flags
def getAllDataTypes(self):
"""Returns data types for all fields"""
dataTypes = [field.dataType for field in self.fields]
return dataTypes
def getFieldDescriptions(self):
"""Returns descriptions for all fields"""
descriptions = [field.getDescription() for field in self.fields]
return descriptions
def saveRecords(self, path='myOutput'):
"""Export all the records into a csv file in numenta format.
Example header format:
fieldName1 fieldName2 fieldName3
date string float
T S
Parameters:
--------------------------------------------------------------------
path: Relative path of the file to which the records are to be exported
"""
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
import csv
with open(path+'.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(self.getAllFieldNames())
writer.writerow(self.getAllDataTypes())
writer.writerow(self.getAllFlags())
writer.writerows(self.getAllRecords())
if self.verbosity>0:
print '******', numRecords,'records exported in numenta format to file:',\
path,'******\n'
def removeAllRecords(self):
"""Deletes all the values in the dataset"""
for field in self.fields:
field.encodings, field.values=[], []
field.numRecords, field.numEncodings= (0, 0)
class _field():
def __init__(self, name, encoderSpec):
"""Initialize a field with various parameters such as n, w, flag, dataType,
encoderType, and tag predicted field."""
self.name=name
#Default values
self.n, self.w = (100, 15)
self.encoderType,self.dataType,self.dataClassName = (None, None, None)
self.flag=''
self.isPredictedField=False
if encoderSpec is not None:
if 'n' in encoderSpec: self.n = encoderSpec.pop('n')
if 'w' in encoderSpec: self.w = encoderSpec.pop('w')
if 'flag' in encoderSpec: self.flag = encoderSpec.pop('flag')
if 'isPredictedField' in encoderSpec: self.isPredictedField\
= encoderSpec.pop('isPredictedField')
if 'dataClass' in encoderSpec: self.dataClass \
= encoderSpec.pop('dataClass')
if 'dataClassName' in encoderSpec: self.dataClassName \
= encoderSpec.pop('dataClassName')
if 'dataType' in encoderSpec: self.dataType = encoderSpec.pop('dataType')
if 'encoderType' in encoderSpec: self.encoderType \
= encoderSpec.pop('encoderType')
# ==========================================================================
# Setting up the encoders
if self.dataType is None and self.encoderType is None:
raise RuntimeError('At least one of dataType and encoderType must be specified')
assert(self.dataType is not None or self.encoderType is not None)
if self.dataType is None or self.encoderType is None:
self._setTypes(encoderSpec)
self._initializeEncoders(encoderSpec)
self.encodings=[]
self.values=[]
self.numRecords=0
self.numEncodings=0
def getDescription(self):
description = dict(n=self.n, w=self.w, flag=self.flag, isPredictedField=\
self.isPredictedField, dataClass=self.dataClassName, encoderType= \
self.encoderType, numRecords=self.numRecords, numEncodings=self.numEncodings)
return description
def addValues(self, values):
"""Add values to the field"""
for v in values:
self.addValue(v)
def addValue(self, value):
"""Add value to the field"""
self.values.append(value)
self.numRecords+=1
def encodeValue(self, value, toBeAdded=True):
"""Value is encoded as a sdr using the encoding parameters of the Field"""
encodedValue = np.array(self.encoder.encode(value), dtype=realDType)
if toBeAdded:
self.encodings.append(encodedValue)
self.numEncodings+=1
return encodedValue
def _setTypes(self, encoderSpec):
"""Set up the dataTypes and initialize encoders"""
if self.encoderType is None:
if self.dataType in ['int','float']:
self.encoderType='adaptiveScalar'
elif self.dataType=='string':
self.encoderType='category'
elif self.dataType in ['date', 'datetime']:
self.encoderType='date'
if self.dataType is None:
if self.encoderType in ['scalar','adaptiveScalar']:
self.dataType='float'
elif self.encoderType in ['category', 'enumeration']:
self.dataType='string'
elif self.encoderType in ['date', 'datetime']:
self.dataType='datetime'
def _initializeEncoders(self, encoderSpec):
""" Initialize the encoders"""
#Initializing scalar encoder
if self.encoderType in ['adaptiveScalar', 'scalar']:
if 'minval' in encoderSpec:
self.minval = encoderSpec.pop('minval')
else: self.minval=None
if 'maxval' in encoderSpec:
self.maxval = encoderSpec.pop('maxval')
else: self.maxval = None
self.encoder=adaptive_scalar.AdaptiveScalarEncoder(name='AdaptiveScalarEncoder', \
w=self.w, n=self.n, minval=self.minval, maxval=self.maxval, periodic=False, forced=True)
#Initializing category encoder
elif self.encoderType=='category':
self.encoder=sdr_category.SDRCategoryEncoder(name='categoryEncoder', \
w=self.w, n=self.n)
#Initializing date encoder
elif self.encoderType in ['date', 'datetime']:
self.encoder=date.DateEncoder(name='dateEncoder')
else:
raise RuntimeError('Error in constructing class object. Either encoder type'
'or dataType must be specified')
| agpl-3.0 | -5,151,167,198,078,947,000 | -55,954,148,055,888,730 | 31.5 | 145 | 0.621477 | false |
umlaeute/tto-oracle | OracleClient.py | 1 | 1756 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2015, IOhannes m zmölnig, forum::für::umläute
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
import requests
import json
if '__main__' == __name__:
def test_json(payload):
print("payload: %s" % (payload))
j=json.dumps(payload)
print("JSON : %s" % (j))
b=bytes(j, 'utf-8')
print("bytes : %s" % (b))
J=json.loads(b.decode())
print("JSON : %s" % (J))
D=J['comments']
print("data : %s (%s)" % (D, type(D)))
URL='http://localhost:8000'
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', type=str, default=URL, help='connection URL for oracle server (default: %s)' % URL)
parser.add_argument('text', nargs='+', help='some text you want to enter')
args=parser.parse_args()
payload={'comments': args.text}
#test_json(payload)
r = requests.post(args.url, data=json.dumps(payload))
print(r.text)
#t=o.speak(inputtext="The artist is stupid!", nouns=["oracle", "situtation"], adjectives=["solid", "nice"], truncate=True)
#print(ot.array2text(t))
| gpl-2.0 | 2,667,768,923,235,967,500 | -577,469,204,350,202,900 | 37.086957 | 126 | 0.648402 | false |
kseistrup/qtile | test/scripts/window.py | 16 | 3845 | # Copyright (c) 2011 Florian Mounier
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#!/usr/bin/env python
"""
This program is carefully crafted to exercise a number of corner-cases in
Qtile.
"""
from __future__ import print_function
import sys
import time
import xcffib
import xcffib.xproto
def configure(window):
window.configure(
width=100,
height=100,
x=0,
y=0,
border_width=1,
)
for i in range(20):
try:
conn = xcffib.connect(display=sys.argv[1])
except xcffib.ConnectionException:
time.sleep(0.1)
continue
except Exception as v:
print("Error opening test window: ", type(v), v, file=sys.stderr)
sys.exit(1)
break
else:
print("Could not open window on display %s" % (sys.argv[1]), file=sys.stderr)
sys.exit(1)
screen = conn.get_setup().roots[conn.pref_screen]
window = conn.generate_id()
background = conn.core.AllocColor(screen.default_colormap, 0x2828, 0x8383, 0xCECE).reply().pixel # Color "#2883ce"
conn.core.CreateWindow(xcffib.CopyFromParent, window, screen.root,
100, 100, 100, 100, 1,
xcffib.xproto.WindowClass.InputOutput, screen.root_visual,
xcffib.xproto.CW.BackPixel | xcffib.xproto.CW.EventMask,
[background, xcffib.xproto.EventMask.StructureNotify | xcffib.xproto.EventMask.Exposure])
conn.core.ChangeProperty(xcffib.xproto.PropMode.Replace,
window, xcffib.xproto.Atom.WM_NAME,
xcffib.xproto.Atom.STRING, 8, len(sys.argv[2]),
sys.argv[2])
wm_protocols = "WM_PROTOCOLS"
wm_protocols = conn.core.InternAtom(0, len(wm_protocols), wm_protocols).reply().atom
wm_delete_window = "WM_DELETE_WINDOW"
wm_delete_window = conn.core.InternAtom(0, len(wm_delete_window), wm_delete_window).reply().atom
conn.core.ChangeProperty(xcffib.xproto.PropMode.Replace,
window, wm_protocols,
xcffib.xproto.Atom.ATOM, 32, 1,
[wm_delete_window])
conn.core.ConfigureWindow(window,
xcffib.xproto.ConfigWindow.X | xcffib.xproto.ConfigWindow.Y |
xcffib.xproto.ConfigWindow.Width | xcffib.xproto.ConfigWindow.Height |
xcffib.xproto.ConfigWindow.BorderWidth,
[0, 0, 100, 100, 1])
conn.core.MapWindow(window)
conn.flush()
conn.core.ConfigureWindow(window,
xcffib.xproto.ConfigWindow.X | xcffib.xproto.ConfigWindow.Y |
xcffib.xproto.ConfigWindow.Width | xcffib.xproto.ConfigWindow.Height |
xcffib.xproto.ConfigWindow.BorderWidth,
[0, 0, 100, 100, 1])
try:
while 1:
event = conn.wait_for_event()
if event.__class__ == xcffib.xproto.ClientMessageEvent:
if conn.core.GetAtomName(event.data.data32[0]).reply().name.to_string() == "WM_DELETE_WINDOW":
sys.exit(1)
except xcffib.XcffibException:
pass
| mit | 1,958,328,167,587,739,100 | 8,909,799,523,500,635,000 | 36.696078 | 114 | 0.704811 | false |
KyoungRan/Django_React_ex | Django_React_Workshop-mbrochh/django/myvenv/lib/python3.4/site-packages/pip/_vendor/lockfile/pidlockfile.py | 536 | 6090 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <[email protected]>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import errno
import os
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if time.time() > end_time:
if timeout is not None and timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
pidfile.write("%s\n" % pid)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| mit | -990,126,776,945,131,100 | -4,635,907,127,496,904,000 | 30.848168 | 75 | 0.58047 | false |
Elandril/SickRage | lib/synchronousdeluge/rencode.py | 156 | 12982 |
"""
rencode -- Web safe object pickling/unpickling.
Public domain, Connelly Barnes 2006-2007.
The rencode module is a modified version of bencode from the
BitTorrent project. For complex, heterogeneous data structures with
many small elements, r-encodings take up significantly less space than
b-encodings:
>>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99}))
13
>>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99}))
26
The rencode format is not standardized, and may change with different
rencode module versions, so you should check that you are using the
same rencode version throughout your project.
"""
__version__ = '1.0.1'
__all__ = ['dumps', 'loads']
# Original bencode module by Petru Paler, et al.
#
# Modifications by Connelly Barnes:
#
# - Added support for floats (sent as 32-bit or 64-bit in network
# order), bools, None.
# - Allowed dict keys to be of any serializable type.
# - Lists/tuples are always decoded as tuples (thus, tuples can be
# used as dict keys).
# - Embedded extra information in the 'typecodes' to save some space.
# - Added a restriction on integer length, so that malicious hosts
# cannot pass us large integers which take a long time to decode.
#
# Licensed by Bram Cohen under the "MIT license":
#
# "Copyright (C) 2001-2002 Bram Cohen
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# The Software is provided "AS IS", without warranty of any kind,
# express or implied, including but not limited to the warranties of
# merchantability, fitness for a particular purpose and
# noninfringement. In no event shall the authors or copyright holders
# be liable for any claim, damages or other liability, whether in an
# action of contract, tort or otherwise, arising from, out of or in
# connection with the Software or the use or other dealings in the
# Software."
#
# (The rencode module is licensed under the above license as well).
#
import struct
import string
from threading import Lock
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
DEFAULT_FLOAT_BITS = 32
# Maximum length of integer when written as base 10 string.
MAX_INT_LENGTH = 64
# The bencode 'typecodes' such as i, d, etc have been extended and
# relocated on the base-256 character set.
CHR_LIST = chr(59)
CHR_DICT = chr(60)
CHR_INT = chr(61)
CHR_INT1 = chr(62)
CHR_INT2 = chr(63)
CHR_INT4 = chr(64)
CHR_INT8 = chr(65)
CHR_FLOAT32 = chr(66)
CHR_FLOAT64 = chr(44)
CHR_TRUE = chr(67)
CHR_FALSE = chr(68)
CHR_NONE = chr(69)
CHR_TERM = chr(127)
# Positive integers with value embedded in typecode.
INT_POS_FIXED_START = 0
INT_POS_FIXED_COUNT = 44
# Dictionaries with length embedded in typecode.
DICT_FIXED_START = 102
DICT_FIXED_COUNT = 25
# Negative integers with value embedded in typecode.
INT_NEG_FIXED_START = 70
INT_NEG_FIXED_COUNT = 32
# Strings with length embedded in typecode.
STR_FIXED_START = 128
STR_FIXED_COUNT = 64
# Lists with length embedded in typecode.
LIST_FIXED_START = STR_FIXED_START+STR_FIXED_COUNT
LIST_FIXED_COUNT = 64
def decode_int(x, f):
f += 1
newf = x.index(CHR_TERM, f)
if newf - f >= MAX_INT_LENGTH:
raise ValueError('overflow')
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_intb(x, f):
f += 1
return (struct.unpack('!b', x[f:f+1])[0], f+1)
def decode_inth(x, f):
f += 1
return (struct.unpack('!h', x[f:f+2])[0], f+2)
def decode_intl(x, f):
f += 1
return (struct.unpack('!l', x[f:f+4])[0], f+4)
def decode_intq(x, f):
f += 1
return (struct.unpack('!q', x[f:f+8])[0], f+8)
def decode_float32(x, f):
f += 1
n = struct.unpack('!f', x[f:f+4])[0]
return (n, f+4)
def decode_float64(x, f):
f += 1
n = struct.unpack('!d', x[f:f+8])[0]
return (n, f+8)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
s = x[colon:colon+n]
try:
t = s.decode("utf8")
if len(t) != len(s):
s = t
except UnicodeDecodeError:
pass
return (s, colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != CHR_TERM:
v, f = decode_func[x[f]](x, f)
r.append(v)
return (tuple(r), f + 1)
def decode_dict(x, f):
r, f = {}, f+1
while x[f] != CHR_TERM:
k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
def decode_true(x, f):
return (True, f+1)
def decode_false(x, f):
return (False, f+1)
def decode_none(x, f):
return (None, f+1)
decode_func = {}
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
decode_func[CHR_LIST ] = decode_list
decode_func[CHR_DICT ] = decode_dict
decode_func[CHR_INT ] = decode_int
decode_func[CHR_INT1 ] = decode_intb
decode_func[CHR_INT2 ] = decode_inth
decode_func[CHR_INT4 ] = decode_intl
decode_func[CHR_INT8 ] = decode_intq
decode_func[CHR_FLOAT32] = decode_float32
decode_func[CHR_FLOAT64] = decode_float64
decode_func[CHR_TRUE ] = decode_true
decode_func[CHR_FALSE ] = decode_false
decode_func[CHR_NONE ] = decode_none
def make_fixed_length_string_decoders():
def make_decoder(slen):
def f(x, f):
s = x[f+1:f+1+slen]
try:
t = s.decode("utf8")
if len(t) != len(s):
s = t
except UnicodeDecodeError:
pass
return (s, f+1+slen)
return f
for i in range(STR_FIXED_COUNT):
decode_func[chr(STR_FIXED_START+i)] = make_decoder(i)
make_fixed_length_string_decoders()
def make_fixed_length_list_decoders():
def make_decoder(slen):
def f(x, f):
r, f = [], f+1
for i in range(slen):
v, f = decode_func[x[f]](x, f)
r.append(v)
return (tuple(r), f)
return f
for i in range(LIST_FIXED_COUNT):
decode_func[chr(LIST_FIXED_START+i)] = make_decoder(i)
make_fixed_length_list_decoders()
def make_fixed_length_int_decoders():
def make_decoder(j):
def f(x, f):
return (j, f+1)
return f
for i in range(INT_POS_FIXED_COUNT):
decode_func[chr(INT_POS_FIXED_START+i)] = make_decoder(i)
for i in range(INT_NEG_FIXED_COUNT):
decode_func[chr(INT_NEG_FIXED_START+i)] = make_decoder(-1-i)
make_fixed_length_int_decoders()
def make_fixed_length_dict_decoders():
def make_decoder(slen):
def f(x, f):
r, f = {}, f+1
for j in range(slen):
k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f)
return f
for i in range(DICT_FIXED_COUNT):
decode_func[chr(DICT_FIXED_START+i)] = make_decoder(i)
make_fixed_length_dict_decoders()
def encode_dict(x,r):
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
def loads(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType, FloatType, NoneType, UnicodeType
def encode_int(x, r):
if 0 <= x < INT_POS_FIXED_COUNT:
r.append(chr(INT_POS_FIXED_START+x))
elif -INT_NEG_FIXED_COUNT <= x < 0:
r.append(chr(INT_NEG_FIXED_START-1-x))
elif -128 <= x < 128:
r.extend((CHR_INT1, struct.pack('!b', x)))
elif -32768 <= x < 32768:
r.extend((CHR_INT2, struct.pack('!h', x)))
elif -2147483648 <= x < 2147483648:
r.extend((CHR_INT4, struct.pack('!l', x)))
elif -9223372036854775808 <= x < 9223372036854775808:
r.extend((CHR_INT8, struct.pack('!q', x)))
else:
s = str(x)
if len(s) >= MAX_INT_LENGTH:
raise ValueError('overflow')
r.extend((CHR_INT, s, CHR_TERM))
def encode_float32(x, r):
r.extend((CHR_FLOAT32, struct.pack('!f', x)))
def encode_float64(x, r):
r.extend((CHR_FLOAT64, struct.pack('!d', x)))
def encode_bool(x, r):
r.extend({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
def encode_none(x, r):
r.extend(CHR_NONE)
def encode_string(x, r):
if len(x) < STR_FIXED_COUNT:
r.extend((chr(STR_FIXED_START + len(x)), x))
else:
r.extend((str(len(x)), ':', x))
def encode_unicode(x, r):
encode_string(x.encode("utf8"), r)
def encode_list(x, r):
if len(x) < LIST_FIXED_COUNT:
r.append(chr(LIST_FIXED_START + len(x)))
for i in x:
encode_func[type(i)](i, r)
else:
r.append(CHR_LIST)
for i in x:
encode_func[type(i)](i, r)
r.append(CHR_TERM)
def encode_dict(x,r):
if len(x) < DICT_FIXED_COUNT:
r.append(chr(DICT_FIXED_START + len(x)))
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
else:
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
encode_func = {}
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
encode_func[NoneType] = encode_none
encode_func[UnicodeType] = encode_unicode
lock = Lock()
try:
from types import BooleanType
encode_func[BooleanType] = encode_bool
except ImportError:
pass
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
lock.acquire()
try:
if float_bits == 32:
encode_func[FloatType] = encode_float32
elif float_bits == 64:
encode_func[FloatType] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
finally:
lock.release()
return ''.join(r)
def test():
f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
L = (({'a':15, 'bb':f1, 'ccc':f2, '':(f3,(),False,True,'')},('a',10**20),tuple(range(-100000,100000)),'b'*31,'b'*62,'b'*64,2**30,2**33,2**62,2**64,2**30,2**33,2**62,2**64,False,False, True, -1, 2, 0),)
assert loads(dumps(L)) == L
d = dict(zip(range(-100000,100000),range(-100000,100000)))
d.update({'a':20, 20:40, 40:41, f1:f2, f2:f3, f3:False, False:True, True:False})
L = (d, {}, {5:6}, {7:7,True:8}, {9:10, 22:39, 49:50, 44: ''})
assert loads(dumps(L)) == L
L = ('', 'a'*10, 'a'*100, 'a'*1000, 'a'*10000, 'a'*100000, 'a'*1000000, 'a'*10000000)
assert loads(dumps(L)) == L
L = tuple([dict(zip(range(n),range(n))) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple([dict(zip(range(n),range(-n,0))) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple([tuple(range(n)) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple(['a'*n for n in range(1000)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple(['a'*n for n in range(1000)]) + (None,True,None)
assert loads(dumps(L)) == L
assert loads(dumps(None)) == None
assert loads(dumps({None:None})) == {None:None}
assert 1e-10<abs(loads(dumps(1.1))-1.1)<1e-6
assert 1e-10<abs(loads(dumps(1.1,32))-1.1)<1e-6
assert abs(loads(dumps(1.1,64))-1.1)<1e-12
assert loads(dumps(u"Hello World!!"))
try:
import psyco
psyco.bind(dumps)
psyco.bind(loads)
except ImportError:
pass
if __name__ == '__main__':
test()
| gpl-3.0 | 7,386,158,493,093,203,000 | 6,824,872,589,906,531,000 | 28.981524 | 205 | 0.60245 | false |
dongjoon-hyun/tensorflow | tensorflow/contrib/framework/python/framework/tensor_util.py | 25 | 11885 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
# Temporary for backwards compatibility
is_tensor = tensor_util.is_tensor
assert_same_float_dtype = check_ops.assert_same_float_dtype
assert_scalar = check_ops.assert_scalar
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
@deprecated(
None, "Please switch to remove_squeezable_dimensions from "
"tf.confusion_matrix. Note that the order of the inputs and outputs of "
"labels and predictions have also been switched.")
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _shape_tensor_compatible(expected_shape, actual_shape):
"""Returns whether actual_shape is compatible with expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_shape: Shape of the tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('shape_tensor_equal',
values=[expected_shape, actual_shape]) as scope:
return math_ops.reduce_all(
math_ops.logical_or(
math_ops.equal(expected_shape, -1),
math_ops.equal(expected_shape, actual_shape, 'equal'),
name='exclude_partial_shape'),
name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _shape_tensor_compatible(expected_shape, actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Note that unknown dimension in `expected_shape` will be ignored.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
if (isinstance(expected_shape, tensor_shape.TensorShape)
and not expected_shape.is_fully_defined()):
expected_shape = [d if d else -1 for d in expected_shape.as_list()]
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
The original tensor argument, possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if tensor_util.is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if (not actual_shape.is_fully_defined()
or tensor_util.is_tensor(expected_shape)):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if (not tensor_util.is_tensor(expected_shape)
and (len(expected_shape) < 1)):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not tensor_util.is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not tensor_util.is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of integer type.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if not data_type.base_dtype.is_integer:
raise ValueError('Expected integer type for %s, received type: %s.'
% (tensor.name, data_type))
return check_ops.assert_scalar(tensor, name=name_scope)
| apache-2.0 | 2,375,079,795,465,155,600 | 7,584,968,777,978,719,000 | 36.257053 | 80 | 0.680353 | false |
aitjcize/PyTox | tools/apicomplete.py | 2 | 1029 | # simple script to test the completeness of the python bindings:
from sys import version_info
if version_info[0] < 3:
from urllib import urlopen
else:
from urllib.request import urlopen
import re
TOXURL =\
"https://raw.githubusercontent.com/irungentoo/toxcore/master/toxcore/tox.h"
PYTOXURL =\
"https://raw.githubusercontent.com/aitjcize/PyTox/master/pytox/core.c"
PYTOXURL =\
"https://raw.githubusercontent.com/kitech/PyTox/newapi/pytox/core.c"
toxsrc = urlopen(TOXURL).read()
pytoxsrc = urlopen(PYTOXURL).read()
res = None
if version_info[0] < 3:
res = re.findall(r"\n[_a-z0-9]+ (tox_[\_a-z]+\()", str(toxsrc))
else:
res = re.findall(r'[_a-z0-9]+ (tox_[\_a-z]+\()', str(toxsrc))
incl = 0
excl = []
for function in res:
if function in str(pytoxsrc):
incl += 1
else:
excl.append(function)
print(
"PyTox includes %d out of %d functions found in tox.h" % (incl, len(res))
)
print("Not included are the functions:")
for item in excl:
print(" %s" % item[:-1])
| gpl-3.0 | 5,636,197,229,929,050,000 | -7,713,660,764,447,791,000 | 21.866667 | 79 | 0.656948 | false |
pierreg/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/models/nn.py | 190 | 1567 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple baseline feed-forward neural network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class NN(hybrid_model.HybridModel):
"""A simple baseline feed-forward neural network."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(NN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [fully_connected.FullyConnectedLayer(
params, 0, device_assigner=device_assigner)]
| apache-2.0 | -6,961,056,713,245,747,000 | -4,270,119,844,735,706,000 | 38.175 | 81 | 0.682195 | false |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/pycoverage/coverage/execfile.py | 209 | 5865 | """Execute files of Python code."""
import imp, marshal, os, sys
from coverage.backward import exec_code_object, open_source
from coverage.misc import ExceptionDuringRun, NoCode, NoSource
try:
# In Py 2.x, the builtins were in __builtin__
BUILTINS = sys.modules['__builtin__']
except KeyError:
# In Py 3.x, they're in builtins
BUILTINS = sys.modules['builtins']
def rsplit1(s, sep):
"""The same as s.rsplit(sep, 1), but works in 2.3"""
parts = s.split(sep)
return sep.join(parts[:-1]), parts[-1]
def run_python_module(modulename, args):
"""Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
"""
openfile = None
glo, loc = globals(), locals()
try:
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if '.' in modulename:
packagename, name = rsplit1(modulename, '.')
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource(
"module does not live in a file: %r" % modulename
)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = '__main__'
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError:
_, err, _ = sys.exc_info()
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
# Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename)
def run_python_file(filename, args, package=None):
"""Run a python file as if it were the main program on the command line.
`filename` is the path to the file to execute, it need not be a .py file.
`args` is the argument array to present as sys.argv, including the first
element naming the file being executed. `package` is the name of the
enclosing package, if any.
"""
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
if package:
main_mod.__package__ = package
main_mod.__builtins__ = BUILTINS
# Set sys.argv properly.
old_argv = sys.argv
sys.argv = args
try:
# Make a code object somehow.
if filename.endswith(".pyc") or filename.endswith(".pyo"):
code = make_code_from_pyc(filename)
else:
code = make_code_from_py(filename)
# Execute the code object.
try:
exec_code_object(code, main_mod.__dict__)
except SystemExit:
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel two layers off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
def make_code_from_py(filename):
"""Get source from `filename` and make a code object of it."""
# Open the source file.
try:
source_file = open_source(filename)
except IOError:
raise NoSource("No file to run: %r" % filename)
try:
source = source_file.read()
finally:
source_file.close()
# We have the source. `compile` still needs the last line to be clean,
# so make sure it is, then compile a code object from it.
if not source or source[-1] != '\n':
source += '\n'
code = compile(source, filename, "exec")
return code
def make_code_from_pyc(filename):
"""Get a code object from a .pyc file."""
try:
fpyc = open(filename, "rb")
except IOError:
raise NoCode("No file to run: %r" % filename)
try:
# First four bytes are a version-specific magic number. It has to
# match or we won't run the file.
magic = fpyc.read(4)
if magic != imp.get_magic():
raise NoCode("Bad magic number in .pyc file")
# Skip the junk in the header that we don't need.
fpyc.read(4) # Skip the moddate.
if sys.version_info >= (3, 3):
# 3.3 added another long to the header (size), skip it.
fpyc.read(4)
# The rest of the file is the code object we want.
code = marshal.load(fpyc)
finally:
fpyc.close()
return code
| bsd-3-clause | -6,307,698,303,451,635,000 | 7,453,742,694,611,647,000 | 33.298246 | 79 | 0.58312 | false |
mrquim/mrquimrepo | repo/script.module.kodi65/lib/kodi65/youtube.py | 4 | 9607 | # -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <[email protected]>
# This program is Free Software see LICENSE file for details
import urllib
import itertools
from kodi65 import utils
from kodi65 import VideoItem
from kodi65 import ItemList
YT_KEY = 'AIzaSyB-BOZ_o09NLVwq_lMskvvj1olDkFI4JK0'
BASE_URL = "https://www.googleapis.com/youtube/v3/"
PLUGIN_BASE = "plugin://script.extendedinfo/?info="
def handle_videos(results, extended=False):
"""
process vidoe api result to ItemList
"""
videos = ItemList(content_type="videos")
for item in results:
snippet = item["snippet"]
thumb = snippet["thumbnails"]["high"]["url"] if "thumbnails" in snippet else ""
try:
video_id = item["id"]["videoId"]
except Exception:
video_id = snippet["resourceId"]["videoId"]
video = VideoItem(label=snippet["title"],
path=PLUGIN_BASE + 'youtubevideo&&id=%s' % video_id)
video.set_infos({'plot': snippet["description"],
'mediatype': "video",
'premiered': snippet["publishedAt"][:10]})
video.set_artwork({'thumb': thumb})
video.set_playable(True)
video.set_properties({'channel_title': snippet["channelTitle"],
'channel_id': snippet["channelId"],
'type': "video",
'youtube_id': video_id})
videos.append(video)
if not extended:
return videos
params = {"part": "contentDetails,statistics",
"id": ",".join([i.get_property("youtube_id") for i in videos])}
ext_results = get_data(method="videos",
params=params)
if not ext_results:
return videos
for item in videos:
for ext_item in ext_results["items"]:
if not item.get_property("youtube_id") == ext_item['id']:
continue
details = ext_item['contentDetails']
stats = ext_item['statistics']
likes = stats.get('likeCount')
dislikes = stats.get('dislikeCount')
item.update_infos({"duration": get_duration_in_seconds(details['duration'])})
props = {"duration": details['duration'][2:].lower(),
"formatted_duration": get_formatted_duration(details['duration']),
"dimension": details['dimension'],
"definition": details['definition'],
"caption": details['caption'],
"viewcount": utils.millify(stats['viewCount']),
"likes": likes,
"dislikes": dislikes}
item.update_properties(props)
if likes and dislikes:
vote_count = int(likes) + int(dislikes)
if vote_count > 0:
item.set_info("rating", round(float(likes) / vote_count * 10, 1))
break
return videos
def get_duration_in_seconds(duration):
"""
convert youtube duration string to seconds int
"""
duration = duration[2:-1].replace("H", "M").split("M")
if len(duration) == 3:
return int(duration[0]) * 3600 + int(duration[1]) * 60 + int(duration[2])
elif len(duration) == 2:
return int(duration[0]) * 60 + int(duration[1])
else:
return int(duration[0])
def get_formatted_duration(duration):
"""
convert youtube duration string to formatted duration
"""
duration = duration[2:-1].replace("H", "M").split("M")
if len(duration) == 3:
return "{}:{}:{}".format(duration[0].zfill(2), duration[1].zfill(2), duration[2].zfill(2))
elif len(duration) == 2:
return "{}:{}".format(duration[0].zfill(2), duration[1].zfill(2))
else:
return "00:{}".format(duration[0].zfill(2))
def handle_playlists(results):
"""
process playlist api result to ItemList
"""
playlists = ItemList(content_type="videos")
for item in results:
snippet = item["snippet"]
thumb = snippet["thumbnails"]["high"]["url"] if "thumbnails" in snippet else ""
try:
playlist_id = item["id"]["playlistId"]
except Exception:
playlist_id = snippet["resourceId"]["playlistId"]
playlist = VideoItem(label=snippet["title"],
path=PLUGIN_BASE + 'youtubeplaylist&&id=%s' % playlist_id)
playlist.set_infos({'plot': snippet["description"],
"mediatype": "video",
'premiered': snippet["publishedAt"][:10]})
playlist.set_art("thumb", thumb)
playlist.set_properties({'youtube_id': playlist_id,
'channel_title': snippet["channelTitle"],
'type': "playlist",
'live': snippet["liveBroadcastContent"].replace("none", "")})
playlists.append(playlist)
params = {"id": ",".join([i.get_property("youtube_id") for i in playlists]),
"part": "contentDetails"}
ext_results = get_data(method="playlists",
params=params)
for item, ext_item in itertools.product(playlists, ext_results["items"]):
if item.get_property("youtube_id") == ext_item['id']:
item.set_property("itemcount", ext_item['contentDetails']['itemCount'])
return playlists
def handle_channels(results):
"""
process channel api result to ItemList
"""
channels = ItemList(content_type="videos")
for item in results:
snippet = item["snippet"]
thumb = snippet["thumbnails"]["high"]["url"] if "thumbnails" in snippet else ""
try:
channel_id = item["id"]["channelId"]
except Exception:
channel_id = snippet["resourceId"]["channelId"]
channel = VideoItem(label=snippet["title"],
path=PLUGIN_BASE + 'youtubechannel&&id=%s' % channel_id)
channel.set_infos({'plot': snippet["description"],
'mediatype': "video",
'premiered': snippet["publishedAt"][:10]})
channel.set_art("thumb", thumb)
channel.set_properties({"youtube_id": channel_id,
"type": "channel"})
channels.append(channel)
channel_ids = [item.get_property("youtube_id") for item in channels]
params = {"id": ",".join(channel_ids),
"part": "contentDetails,statistics,brandingSettings"}
ext_results = get_data(method="channels",
params=params)
for item, ext_item in itertools.product(channels, ext_results["items"]):
if item.get_property("youtube_id") == ext_item['id']:
item.set_property("itemcount", ext_item['statistics']['videoCount'])
item.set_art("fanart", ext_item["brandingSettings"]["image"].get("bannerTvMediumImageUrl"))
return channels
def get_data(method, params=None, cache_days=0.5):
"""
fetch data from youtube API
"""
params = params if params else {}
params["key"] = YT_KEY
params = {k: unicode(v).encode('utf-8') for k, v in params.iteritems() if v}
url = "{base_url}{method}?{params}".format(base_url=BASE_URL,
method=method,
params=urllib.urlencode(params))
return utils.get_JSON_response(url=url,
cache_days=cache_days,
folder="YouTube")
def search(search_str="", hd="", orderby="relevance", limit=40, extended=True, page="", filters=None, media_type="video"):
"""
returns ItemList according to search term, filters etc.
"""
params = {"part": "id,snippet",
"maxResults": limit,
"type": media_type,
"order": orderby,
"pageToken": page,
"hd": str(hd and not hd == "false"),
"q": search_str.replace('"', '')}
results = get_data(method="search",
params=utils.merge_dicts(params, filters if filters else {}))
if not results:
return None
if media_type == "video":
listitems = handle_videos(results["items"], extended=extended)
elif media_type == "playlist":
listitems = handle_playlists(results["items"])
elif media_type == "channel":
listitems = handle_channels(results["items"])
listitems.total_pages = results["pageInfo"]["resultsPerPage"]
listitems.totals = results["pageInfo"]["totalResults"]
listitems.next_page_token = results.get("nextPageToken", "")
listitems.prev_page_token = results.get("prevPageToken", "")
return listitems
def get_playlist_videos(playlist_id=""):
"""
returns ItemList from playlist with *playlist_id
"""
if not playlist_id:
return []
params = {"part": "id,snippet",
"maxResults": "50",
"playlistId": playlist_id}
results = get_data(method="playlistItems",
params=params)
if not results:
return []
return handle_videos(results["items"])
def get_user_playlists(username=""):
"""
returns ItemList with user uploads from *username
"""
params = {"part": "contentDetails",
"forUsername": username}
results = get_data(method="channels",
params=params)
if not results["items"]:
return None
return results["items"][0]["contentDetails"]["relatedPlaylists"]
| gpl-2.0 | 6,046,923,330,194,915,000 | 2,065,086,831,062,008,800 | 39.535865 | 122 | 0.558863 | false |
0xffea/keystone | keystone/common/openssl.py | 2 | 8439 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import os
import stat
import subprocess
from keystone.common import logging
from keystone import config
LOG = logging.getLogger(__name__)
CONF = config.CONF
DIR_PERMS = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
CERT_PERMS = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
PRIV_PERMS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
DEFAULT_SUBJECT = '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com'
def file_exists(file_path):
return os.path.exists(file_path)
class ConfigurePKI(object):
"""Generate files for PKI signing using OpenSSL.
Signed tokens require a private key and signing certificate which itself
must be signed by a CA. This class generates them with workable defaults
if each of the files are not present
"""
def __init__(self, keystone_user, keystone_group, **kw):
self.conf_dir = os.path.dirname(CONF.signing.ca_certs)
self.use_keystone_user = keystone_user
self.use_keystone_group = keystone_group
self.ssl_config_file_name = os.path.join(self.conf_dir, "openssl.conf")
self.ca_key_file = os.path.join(self.conf_dir, "cakey.pem")
self.request_file_name = os.path.join(self.conf_dir, "req.pem")
self.ssl_dictionary = {'conf_dir': self.conf_dir,
'ca_cert': CONF.signing.ca_certs,
'ssl_config': self.ssl_config_file_name,
'ca_private_key': self.ca_key_file,
'ca_cert_cn': 'hostname',
'request_file': self.request_file_name,
'signing_key': CONF.signing.keyfile,
'signing_cert': CONF.signing.certfile,
'default_subject': DEFAULT_SUBJECT,
'key_size': int(CONF.signing.key_size),
'valid_days': int(CONF.signing.valid_days),
'ca_password': CONF.signing.ca_password}
def _make_dirs(self, file_name):
dir = os.path.dirname(file_name)
if not file_exists(dir):
os.makedirs(dir, DIR_PERMS)
if os.geteuid() == 0 and self.use_keystone_group:
os.chown(dir, -1, self.use_keystone_group)
def _set_permissions(self, file_name, perms):
os.chmod(file_name, perms)
if os.geteuid() == 0:
os.chown(file_name, self.use_keystone_user or -1,
self.use_keystone_group or -1)
def exec_command(self, command):
to_exec = command % self.ssl_dictionary
LOG.info(to_exec)
subprocess.check_call(to_exec.rsplit(' '))
def build_ssl_config_file(self):
if not file_exists(self.ssl_config_file_name):
self._make_dirs(self.ssl_config_file_name)
ssl_config_file = open(self.ssl_config_file_name, 'w')
ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
ssl_config_file.close()
self._set_permissions(self.ssl_config_file_name, CERT_PERMS)
index_file_name = os.path.join(self.conf_dir, 'index.txt')
if not file_exists(index_file_name):
index_file = open(index_file_name, 'w')
index_file.write('')
index_file.close()
self._set_permissions(self.ssl_config_file_name, PRIV_PERMS)
serial_file_name = os.path.join(self.conf_dir, 'serial')
if not file_exists(serial_file_name):
index_file = open(serial_file_name, 'w')
index_file.write('01')
index_file.close()
self._set_permissions(self.ssl_config_file_name, PRIV_PERMS)
def build_ca_cert(self):
if not file_exists(CONF.signing.ca_certs):
if not os.path.exists(self.ca_key_file):
self._make_dirs(self.ca_key_file)
self.exec_command('openssl genrsa -out %(ca_private_key)s '
'%(key_size)d -config %(ssl_config)s')
self._set_permissions(self.ssl_dictionary['ca_private_key'],
stat.S_IRUSR)
self.exec_command('openssl req -new -x509 -extensions v3_ca '
'-passin pass:%(ca_password)s '
'-key %(ca_private_key)s -out %(ca_cert)s '
'-days %(valid_days)d '
'-config %(ssl_config)s '
'-subj %(default_subject)s')
self._set_permissions(self.ssl_dictionary['ca_cert'], CERT_PERMS)
def build_private_key(self):
signing_keyfile = self.ssl_dictionary['signing_key']
if not file_exists(signing_keyfile):
self._make_dirs(signing_keyfile)
self.exec_command('openssl genrsa -out %(signing_key)s '
'%(key_size)d '
'-config %(ssl_config)s')
self._set_permissions(os.path.dirname(signing_keyfile), PRIV_PERMS)
self._set_permissions(signing_keyfile, stat.S_IRUSR)
def build_signing_cert(self):
if not file_exists(CONF.signing.certfile):
self._make_dirs(CONF.signing.certfile)
self.exec_command('openssl req -key %(signing_key)s -new -nodes '
'-out %(request_file)s -config %(ssl_config)s '
'-subj %(default_subject)s')
self.exec_command('openssl ca -batch -out %(signing_cert)s '
'-config %(ssl_config)s '
'-infiles %(request_file)s')
def run(self):
self.build_ssl_config_file()
self.build_ca_cert()
self.build_private_key()
self.build_signing_cert()
sslconfig = """
# OpenSSL configuration file.
#
# Establish working directory.
dir = %(conf_dir)s
[ ca ]
default_ca = CA_default
[ CA_default ]
new_certs_dir = $dir
serial = $dir/serial
database = $dir/index.txt
certificate = %(ca_cert)s
private_key = %(ca_private_key)s
default_days = 365
default_md = md5
preserve = no
email_in_dn = no
nameopt = default_ca
certopt = default_ca
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 1024 # Size of keys
default_keyfile = key.pem # name of generated keys
default_md = md5 # message digest algorithm
string_mask = nombstr # permitted characters
distinguished_name = req_distinguished_name
req_extensions = v3_req
[ req_distinguished_name ]
0.organizationName = Organization Name (company)
organizationalUnitName = Organizational Unit Name (department, division)
emailAddress = Email Address
emailAddress_max = 40
localityName = Locality Name (city, district)
stateOrProvinceName = State or Province Name (full name)
countryName = Country Name (2 letter code)
countryName_min = 2
countryName_max = 2
commonName = Common Name (hostname, IP, or your name)
commonName_max = 64
# Default values for the above, for consistency and less typing.
0.organizationName_default = Openstack, Inc
localityName_default = Undefined
stateOrProvinceName_default = Undefined
countryName_default = US
commonName_default = %(ca_cert_cn)s
[ v3_ca ]
basicConstraints = CA:TRUE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer:always
[ v3_req ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash"""
| apache-2.0 | 8,077,664,316,750,960,000 | -678,493,062,371,888,800 | 37.889401 | 79 | 0.594857 | false |
pombredanne/pants | src/python/pants/backend/codegen/wire/java/java_wire_library.py | 9 | 2834 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.base.validation import assert_list
logger = logging.getLogger(__name__)
class JavaWireLibrary(ExportableJvmLibrary):
"""A Java library generated from Wire IDL files.
Supports Wire 1.x only.
For an example Wire 2.x interface that generates service stubs see:
https://github.com/ericzundel/mvn2pants/tree/master/src/python/squarepants/plugins/sake_wire_codegen
But note this requires you to write a custom wire code generator with a command line interface.
:API: public
"""
def __init__(self,
payload=None,
service_writer=None,
service_writer_options=None,
roots=None,
registry_class=None,
enum_options=None,
no_options=None,
**kwargs):
"""
:param string service_writer: the name of the class to pass as the --service_writer option to
the Wire compiler (For wire 1.0 only)
:param list service_writer_options: A list of options to pass to the service writer (For
wire 1.x only)
:param list roots: passed through to the --roots option of the Wire compiler
:param string registry_class: fully qualified class name of RegistryClass to create. If in
doubt, specify com.squareup.wire.SimpleServiceWriter
:param list enum_options: list of enums to pass to as the --enum-enum_options option, # optional
:param boolean no_options: boolean that determines if --no_options flag is passed
"""
if not service_writer and service_writer_options:
raise TargetDefinitionException(self,
'service_writer_options requires setting service_writer')
payload = payload or Payload()
payload.add_fields({
'service_writer': PrimitiveField(service_writer or None),
'service_writer_options': PrimitiveField(
assert_list(service_writer_options, key_arg='service_writer_options',
raise_type=TargetDefinitionException)),
'roots': PrimitiveField(roots or []),
'registry_class': PrimitiveField(registry_class or None),
'enum_options': PrimitiveField(enum_options or []),
'no_options': PrimitiveField(no_options or False),
})
super(JavaWireLibrary, self).__init__(payload=payload, **kwargs)
| apache-2.0 | -3,840,293,330,298,915,300 | 8,229,217,739,109,712,000 | 39.485714 | 102 | 0.694425 | false |
graphite-server/carbon | lib/carbon/conf.py | 4 | 19985 | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
import sys
import pwd
import errno
from os.path import join, dirname, normpath, exists, isdir
from optparse import OptionParser
from ConfigParser import ConfigParser
import whisper
from carbon import log
from carbon.exceptions import CarbonConfigException
from twisted.python import usage
defaults = dict(
USER="",
MAX_CACHE_SIZE=float('inf'),
MAX_UPDATES_PER_SECOND=500,
MAX_CREATES_PER_MINUTE=float('inf'),
LINE_RECEIVER_INTERFACE='0.0.0.0',
LINE_RECEIVER_PORT=2003,
ENABLE_UDP_LISTENER=False,
UDP_RECEIVER_INTERFACE='0.0.0.0',
UDP_RECEIVER_PORT=2003,
PICKLE_RECEIVER_INTERFACE='0.0.0.0',
PICKLE_RECEIVER_PORT=2004,
CACHE_QUERY_INTERFACE='0.0.0.0',
CACHE_QUERY_PORT=7002,
LOG_UPDATES=True,
LOG_CACHE_HITS=True,
WHISPER_AUTOFLUSH=False,
WHISPER_SPARSE_CREATE=False,
WHISPER_FALLOCATE_CREATE=False,
WHISPER_LOCK_WRITES=False,
MAX_DATAPOINTS_PER_MESSAGE=500,
MAX_AGGREGATION_INTERVALS=5,
MAX_QUEUE_SIZE=1000,
QUEUE_LOW_WATERMARK_PCT=0.8,
TIME_TO_DEFER_SENDING=0.0001,
ENABLE_AMQP=False,
AMQP_VERBOSE=False,
BIND_PATTERNS=['#'],
ENABLE_MANHOLE=False,
MANHOLE_INTERFACE='127.0.0.1',
MANHOLE_PORT=7222,
MANHOLE_USER="",
MANHOLE_PUBLIC_KEY="",
RELAY_METHOD='rules',
REPLICATION_FACTOR=1,
DESTINATIONS=[],
USE_FLOW_CONTROL=True,
USE_INSECURE_UNPICKLER=False,
USE_WHITELIST=False,
CARBON_METRIC_PREFIX='carbon',
CARBON_METRIC_INTERVAL=60,
CACHE_WRITE_STRATEGY='sorted',
WRITE_BACK_FREQUENCY=None,
MIN_RESET_STAT_FLOW=1000,
MIN_RESET_RATIO=0.9,
MIN_RESET_INTERVAL=121,
USE_RATIO_RESET=False,
LOG_LISTENER_CONN_SUCCESS=True,
LOG_AGGREGATOR_MISSES=True,
AGGREGATION_RULES='aggregation-rules.conf',
REWRITE_RULES='rewrite-rules.conf',
RELAY_RULES='relay-rules.conf',
ENABLE_LOGROTATE=True,
)
def _umask(value):
return int(value, 8)
def _process_alive(pid):
if exists("/proc"):
return exists("/proc/%d" % pid)
else:
try:
os.kill(int(pid), 0)
return True
except OSError, err:
return err.errno == errno.EPERM
class OrderedConfigParser(ConfigParser):
"""Hacky workaround to ensure sections are always returned in the order
they are defined in. Note that this does *not* make any guarantees about
the order of options within a section or the order in which sections get
written back to disk on write()."""
_ordered_sections = []
def read(self, path):
# Verifies a file exists *and* is readable
if not os.access(path, os.R_OK):
raise CarbonConfigException("Error: Missing config file or wrong perms on %s" % path)
result = ConfigParser.read(self, path)
sections = []
for line in open(path):
line = line.strip()
if line.startswith('[') and line.endswith(']'):
sections.append( line[1:-1] )
self._ordered_sections = sections
return result
def sections(self):
return list( self._ordered_sections ) # return a copy for safety
class Settings(dict):
__getattr__ = dict.__getitem__
def __init__(self):
dict.__init__(self)
self.update(defaults)
def readFrom(self, path, section):
parser = ConfigParser()
if not parser.read(path):
raise CarbonConfigException("Failed to read config file %s" % path)
if not parser.has_section(section):
return
for key,value in parser.items(section):
key = key.upper()
# Detect type from defaults dict
if key in defaults:
valueType = type( defaults[key] )
else:
valueType = str
if valueType is list:
value = [ v.strip() for v in value.split(',') ]
elif valueType is bool:
value = parser.getboolean(section, key)
else:
# Attempt to figure out numeric types automatically
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
self[key] = value
settings = Settings()
settings.update(defaults)
class CarbonCacheOptions(usage.Options):
optFlags = [
["debug", "", "Run in debug mode."],
]
optParameters = [
["config", "c", None, "Use the given config file."],
["instance", "", "a", "Manage a specific carbon instance."],
["logdir", "", None, "Write logs to the given directory."],
["whitelist", "", None, "List of metric patterns to allow."],
["blacklist", "", None, "List of metric patterns to disallow."],
]
def postOptions(self):
global settings
program = self.parent.subCommand
# Use provided pidfile (if any) as default for configuration. If it's
# set to 'twistd.pid', that means no value was provided and the default
# was used.
pidfile = self.parent["pidfile"]
if pidfile.endswith("twistd.pid"):
pidfile = None
self["pidfile"] = pidfile
# Enforce a default umask of '022' if none was set.
if not self.parent.has_key("umask") or self.parent["umask"] is None:
self.parent["umask"] = 022
# Read extra settings from the configuration file.
program_settings = read_config(program, self)
settings.update(program_settings)
settings["program"] = program
# Normalize and expand paths
settings["STORAGE_DIR"] = os.path.normpath(os.path.expanduser(settings["STORAGE_DIR"]))
settings["LOCAL_DATA_DIR"] = os.path.normpath(os.path.expanduser(settings["LOCAL_DATA_DIR"]))
settings["WHITELISTS_DIR"] = os.path.normpath(os.path.expanduser(settings["WHITELISTS_DIR"]))
settings["PID_DIR"] = os.path.normpath(os.path.expanduser(settings["PID_DIR"]))
settings["LOG_DIR"] = os.path.normpath(os.path.expanduser(settings["LOG_DIR"]))
settings["pidfile"] = os.path.normpath(os.path.expanduser(settings["pidfile"]))
# Set process uid/gid by changing the parent config, if a user was
# provided in the configuration file.
if settings.USER:
self.parent["uid"], self.parent["gid"] = (
pwd.getpwnam(settings.USER)[2:4])
# Set the pidfile in parent config to the value that was computed by
# C{read_config}.
self.parent["pidfile"] = settings["pidfile"]
storage_schemas = join(settings["CONF_DIR"], "storage-schemas.conf")
if not exists(storage_schemas):
print "Error: missing required config %s" % storage_schemas
sys.exit(1)
if settings.WHISPER_AUTOFLUSH:
log.msg("Enabling Whisper autoflush")
whisper.AUTOFLUSH = True
if settings.WHISPER_FALLOCATE_CREATE:
if whisper.CAN_FALLOCATE:
log.msg("Enabling Whisper fallocate support")
else:
log.err("WHISPER_FALLOCATE_CREATE is enabled but linking failed.")
if settings.WHISPER_LOCK_WRITES:
if whisper.CAN_LOCK:
log.msg("Enabling Whisper file locking")
whisper.LOCK = True
else:
log.err("WHISPER_LOCK_WRITES is enabled but import of fcntl module failed.")
if not "action" in self:
self["action"] = "start"
self.handleAction()
# If we are not running in debug mode or non-daemon mode, then log to a
# directory, otherwise log output will go to stdout. If parent options
# are set to log to syslog, then use that instead.
if not self["debug"]:
if self.parent.get("syslog", None):
log.logToSyslog(self.parent["prefix"])
elif not self.parent["nodaemon"]:
logdir = settings.LOG_DIR
if not isdir(logdir):
os.makedirs(logdir)
if settings.USER:
# We have not yet switched to the specified user,
# but that user must be able to create files in this
# directory.
os.chown(logdir, self.parent["uid"], self.parent["gid"])
log.logToDir(logdir)
if self["whitelist"] is None:
self["whitelist"] = join(settings["CONF_DIR"], "whitelist.conf")
settings["whitelist"] = self["whitelist"]
if self["blacklist"] is None:
self["blacklist"] = join(settings["CONF_DIR"], "blacklist.conf")
settings["blacklist"] = self["blacklist"]
def parseArgs(self, *action):
"""If an action was provided, store it for further processing."""
if len(action) == 1:
self["action"] = action[0]
def handleAction(self):
"""Handle extra argument for backwards-compatibility.
* C{start} will simply do minimal pid checking and otherwise let twistd
take over.
* C{stop} will kill an existing running process if it matches the
C{pidfile} contents.
* C{status} will simply report if the process is up or not.
"""
action = self["action"]
pidfile = self.parent["pidfile"]
program = settings["program"]
instance = self["instance"]
if action == "stop":
if not exists(pidfile):
print "Pidfile %s does not exist" % pidfile
raise SystemExit(0)
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except IOError:
print "Could not read pidfile %s" % pidfile
raise SystemExit(1)
print "Sending kill signal to pid %d" % pid
try:
os.kill(pid, 15)
except OSError, e:
if e.errno == errno.ESRCH:
print "No process with pid %d running" % pid
else:
raise
raise SystemExit(0)
elif action == "status":
if not exists(pidfile):
print "%s (instance %s) is not running" % (program, instance)
raise SystemExit(1)
pf = open(pidfile, "r")
try:
pid = int(pf.read().strip())
pf.close()
except IOError:
print "Failed to read pid from %s" % pidfile
raise SystemExit(1)
if _process_alive(pid):
print ("%s (instance %s) is running with pid %d" %
(program, instance, pid))
raise SystemExit(0)
else:
print "%s (instance %s) is not running" % (program, instance)
raise SystemExit(1)
elif action == "start":
if exists(pidfile):
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except IOError:
print "Could not read pidfile %s" % pidfile
raise SystemExit(1)
if _process_alive(pid):
print ("%s (instance %s) is already running with pid %d" %
(program, instance, pid))
raise SystemExit(1)
else:
print "Removing stale pidfile %s" % pidfile
try:
os.unlink(pidfile)
except IOError:
print "Could not remove pidfile %s" % pidfile
# Try to create the PID directory
else:
if not os.path.exists(settings["PID_DIR"]):
try:
os.makedirs(settings["PID_DIR"])
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(settings["PID_DIR"]):
pass
else:
raise
print "Starting %s (instance %s)" % (program, instance)
else:
print "Invalid action '%s'" % action
print "Valid actions: start stop status"
raise SystemExit(1)
class CarbonAggregatorOptions(CarbonCacheOptions):
optParameters = [
["rules", "", None, "Use the given aggregation rules file."],
["rewrite-rules", "", None, "Use the given rewrite rules file."],
] + CarbonCacheOptions.optParameters
def postOptions(self):
CarbonCacheOptions.postOptions(self)
if self["rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['AGGREGATION_RULES'])
settings["aggregation-rules"] = self["rules"]
if self["rewrite-rules"] is None:
self["rewrite-rules"] = join(settings["CONF_DIR"],
settings['REWRITE_RULES'])
settings["rewrite-rules"] = self["rewrite-rules"]
class CarbonRelayOptions(CarbonCacheOptions):
optParameters = [
["rules", "", None, "Use the given relay rules file."],
["aggregation-rules", "", None, "Use the given aggregation rules file."],
] + CarbonCacheOptions.optParameters
def postOptions(self):
CarbonCacheOptions.postOptions(self)
if self["rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['RELAY_RULES'])
settings["relay-rules"] = self["rules"]
if self["aggregation-rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['AGGREGATION_RULES'])
settings["aggregation-rules"] = self["rules"]
if settings["RELAY_METHOD"] not in ("rules", "consistent-hashing", "aggregated-consistent-hashing"):
print ("In carbon.conf, RELAY_METHOD must be either 'rules' or "
"'consistent-hashing' or 'aggregated-consistent-hashing'. Invalid value: '%s'" %
settings.RELAY_METHOD)
sys.exit(1)
def get_default_parser(usage="%prog [options] <start|stop|status>"):
"""Create a parser for command line options."""
parser = OptionParser(usage=usage)
parser.add_option(
"--debug", action="store_true",
help="Run in the foreground, log to stdout")
parser.add_option(
"--nodaemon", action="store_true",
help="Run in the foreground")
parser.add_option(
"--profile",
help="Record performance profile data to the given file")
parser.add_option(
"--pidfile", default=None,
help="Write pid to the given file")
parser.add_option(
"--umask", default=None,
help="Use the given umask when creating files")
parser.add_option(
"--config",
default=None,
help="Use the given config file")
parser.add_option(
"--whitelist",
default=None,
help="Use the given whitelist file")
parser.add_option(
"--blacklist",
default=None,
help="Use the given blacklist file")
parser.add_option(
"--logdir",
default=None,
help="Write logs in the given directory")
parser.add_option(
"--instance",
default='a',
help="Manage a specific carbon instance")
return parser
def get_parser(name):
parser = get_default_parser()
if name == "carbon-aggregator":
parser.add_option(
"--rules",
default=None,
help="Use the given aggregation rules file.")
parser.add_option(
"--rewrite-rules",
default=None,
help="Use the given rewrite rules file.")
elif name == "carbon-relay":
parser.add_option(
"--rules",
default=None,
help="Use the given relay rules file.")
return parser
def parse_options(parser, args):
"""
Parse command line options and print usage message if no arguments were
provided for the command.
"""
(options, args) = parser.parse_args(args)
if not args:
parser.print_usage()
raise SystemExit(1)
if args[0] not in ("start", "stop", "status"):
parser.print_usage()
raise SystemExit(1)
return options, args
def read_config(program, options, **kwargs):
"""
Read settings for 'program' from configuration file specified by
'options["config"]', with missing values provided by 'defaults'.
"""
settings = Settings()
settings.update(defaults)
# Initialize default values if not set yet.
for name, value in kwargs.items():
settings.setdefault(name, value)
graphite_root = kwargs.get("ROOT_DIR")
if graphite_root is None:
graphite_root = os.environ.get('GRAPHITE_ROOT')
if graphite_root is None:
raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT "
"needs to be provided.")
# Default config directory to root-relative, unless overriden by the
# 'GRAPHITE_CONF_DIR' environment variable.
settings.setdefault("CONF_DIR",
os.environ.get("GRAPHITE_CONF_DIR",
join(graphite_root, "conf")))
if options["config"] is None:
options["config"] = join(settings["CONF_DIR"], "carbon.conf")
else:
# Set 'CONF_DIR' to the parent directory of the 'carbon.conf' config
# file.
settings["CONF_DIR"] = dirname(normpath(options["config"]))
# Storage directory can be overriden by the 'GRAPHITE_STORAGE_DIR'
# environment variable. It defaults to a path relative to GRAPHITE_ROOT
# for backwards compatibility though.
settings.setdefault("STORAGE_DIR",
os.environ.get("GRAPHITE_STORAGE_DIR",
join(graphite_root, "storage")))
# By default, everything is written to subdirectories of the storage dir.
settings.setdefault(
"PID_DIR", settings["STORAGE_DIR"])
settings.setdefault(
"LOG_DIR", join(settings["STORAGE_DIR"], "log", program))
settings.setdefault(
"LOCAL_DATA_DIR", join(settings["STORAGE_DIR"], "whisper"))
settings.setdefault(
"WHITELISTS_DIR", join(settings["STORAGE_DIR"], "lists"))
# Read configuration options from program-specific section.
section = program[len("carbon-"):]
config = options["config"]
if not exists(config):
raise CarbonConfigException("Error: missing required config %r" % config)
settings.readFrom(config, section)
settings.setdefault("instance", options["instance"])
# If a specific instance of the program is specified, augment the settings
# with the instance-specific settings and provide sane defaults for
# optional settings.
if options["instance"]:
settings.readFrom(config,
"%s:%s" % (section, options["instance"]))
settings["pidfile"] = (
options["pidfile"] or
join(settings["PID_DIR"], "%s-%s.pid" %
(program, options["instance"])))
settings["LOG_DIR"] = (options["logdir"] or
join(settings["LOG_DIR"],
"%s-%s" % (program ,options["instance"])))
else:
settings["pidfile"] = (
options["pidfile"] or
join(settings["PID_DIR"], '%s.pid' % program))
settings["LOG_DIR"] = (options["logdir"] or settings["LOG_DIR"])
return settings
| apache-2.0 | 3,357,551,525,680,887,300 | 5,912,646,919,063,420,000 | 33.39759 | 108 | 0.587491 | false |
einstein95/crunchy-xml-decoder | crunchy-xml-decoder/unidecode/x0fd.py | 252 | 3764 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 | 4,846,498,851,918,886,000 | 4,323,732,839,395,671,000 | 13.645914 | 16 | 0.272051 | false |
beiko-lab/gengis | bin/Lib/test/test_tuple.py | 13 | 5569 | from test import test_support, seq_tests
import gc
class TupleTest(seq_tests.CommonTest):
type2test = tuple
def test_constructors(self):
super(TupleTest, self).test_constructors()
# calling built-in types without argument must return empty
self.assertEqual(tuple(), ())
t0_3 = (0, 1, 2, 3)
t0_3_bis = tuple(t0_3)
self.assertTrue(t0_3 is t0_3_bis)
self.assertEqual(tuple([]), ())
self.assertEqual(tuple([0, 1, 2, 3]), (0, 1, 2, 3))
self.assertEqual(tuple(''), ())
self.assertEqual(tuple('spam'), ('s', 'p', 'a', 'm'))
def test_truth(self):
super(TupleTest, self).test_truth()
self.assertTrue(not ())
self.assertTrue((42, ))
def test_len(self):
super(TupleTest, self).test_len()
self.assertEqual(len(()), 0)
self.assertEqual(len((0,)), 1)
self.assertEqual(len((0, 1, 2)), 3)
def test_iadd(self):
super(TupleTest, self).test_iadd()
u = (0, 1)
u2 = u
u += (2, 3)
self.assertTrue(u is not u2)
def test_imul(self):
super(TupleTest, self).test_imul()
u = (0, 1)
u2 = u
u *= 3
self.assertTrue(u is not u2)
def test_tupleresizebug(self):
# Check that a specific bug in _PyTuple_Resize() is squashed.
def f():
for i in range(1000):
yield i
self.assertEqual(list(tuple(f())), range(1000))
def test_hash(self):
# See SF bug 942952: Weakness in tuple hash
# The hash should:
# be non-commutative
# should spread-out closely spaced values
# should not exhibit cancellation in tuples like (x,(x,y))
# should be distinct from element hashes: hash(x)!=hash((x,))
# This test exercises those cases.
# For a pure random hash and N=50, the expected number of occupied
# buckets when tossing 252,600 balls into 2**32 buckets
# is 252,592.6, or about 7.4 expected collisions. The
# standard deviation is 2.73. On a box with 64-bit hash
# codes, no collisions are expected. Here we accept no
# more than 15 collisions. Any worse and the hash function
# is sorely suspect.
N=50
base = range(N)
xp = [(i, j) for i in base for j in base]
inps = base + [(i, j) for i in base for j in xp] + \
[(i, j) for i in xp for j in base] + xp + zip(base)
collisions = len(inps) - len(set(map(hash, inps)))
self.assertTrue(collisions <= 15)
def test_repr(self):
l0 = tuple()
l2 = (0, 1, 2)
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), repr(l0))
self.assertEqual(str(a2), repr(l2))
self.assertEqual(repr(a0), "()")
self.assertEqual(repr(a2), "(0, 1, 2)")
def _not_tracked(self, t):
# Nested tuples can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@test_support.cpython_only
def test_track_literals(self):
# Test GC-optimization of tuple literals
x, y, z = 1.5, "a", []
self._not_tracked(())
self._not_tracked((1,))
self._not_tracked((1, 2))
self._not_tracked((1, 2, "a"))
self._not_tracked((1, 2, (None, True, False, ()), int))
self._not_tracked((object(),))
self._not_tracked(((1, x), y, (2, 3)))
# Tuples with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked(([],))
self._tracked(([1],))
self._tracked(({},))
self._tracked((set(),))
self._tracked((x, y, z))
def check_track_dynamic(self, tp, always_track):
x, y, z = 1.5, "a", []
check = self._tracked if always_track else self._not_tracked
check(tp())
check(tp([]))
check(tp(set()))
check(tp([1, x, y]))
check(tp(obj for obj in [1, x, y]))
check(tp(set([1, x, y])))
check(tp(tuple([obj]) for obj in [1, x, y]))
check(tuple(tp([obj]) for obj in [1, x, y]))
self._tracked(tp([z]))
self._tracked(tp([[x, y]]))
self._tracked(tp([{x: y}]))
self._tracked(tp(obj for obj in [x, y, z]))
self._tracked(tp(tuple([obj]) for obj in [x, y, z]))
self._tracked(tuple(tp([obj]) for obj in [x, y, z]))
@test_support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically constructed tuples.
self.check_track_dynamic(tuple, False)
@test_support.cpython_only
def test_track_subtypes(self):
# Tuple subtypes must always be tracked
class MyTuple(tuple):
pass
self.check_track_dynamic(MyTuple, True)
@test_support.cpython_only
def test_bug7466(self):
# Trying to untrack an unfinished tuple could crash Python
self._not_tracked(tuple(gc.collect() for i in range(101)))
def test_main():
test_support.run_unittest(TupleTest)
if __name__=="__main__":
test_main()
| gpl-3.0 | -1,250,727,118,984,941,300 | -4,520,708,569,842,560,500 | 32.590062 | 75 | 0.532412 | false |
lcy-seso/models | globally_normalized_reader/model.py | 4 | 14339 | #!/usr/bin/env python
#coding=utf-8
import paddle.v2 as paddle
from paddle.v2.layer import parse_network
import basic_modules
from config import ModelConfig
__all__ = ["GNR"]
def build_pretrained_embedding(name, data_type, emb_dim, emb_drop=0.):
"""create word a embedding layer which loads pre-trained embeddings.
Arguments:
- name: The name of the data layer which accepts one-hot input.
- data_type: PaddlePaddle's data type for data layer.
- emb_dim: The path to the data files.
"""
return paddle.layer.embedding(
input=paddle.layer.data(
name=name, type=data_type),
size=emb_dim,
param_attr=paddle.attr.Param(
name="GloveVectors", is_static=True),
layer_attr=paddle.attr.ExtraLayerAttribute(drop_rate=emb_drop), )
def encode_question(input_embedding,
lstm_hidden_dim,
depth,
passage_indep_embedding_dim,
prefix=""):
"""build question encoding by using bidirectional LSTM.
Each question word is encoded by runing a stack of bidirectional LSTM over
word embedding in question, producing hidden states. The hidden states are
used to compute a passage-independent question embedding.
The final question encoding is constructed by concatenating the final
hidden states of the forward and backward LSTMs and the passage-independent
embedding.
Arguments:
- input_embedding: The question word embeddings.
- lstm_hidden_dim: The dimension of bi-directional LSTM.
- depth: The depth of stacked bi-directional LSTM.
- passage_indep_embedding_dim: The dimension of passage-independent
embedding.
- prefix: A string which will be appended to name of each layer
created in this function. Each layer in a network should
has a unique name. The prefix makes this fucntion can be
called multiple times.
"""
# stacked bi-directional LSTM to process question embeddings.
lstm_final, lstm_outs = basic_modules.stacked_bidirectional_lstm(
input_embedding, lstm_hidden_dim, depth, 0., prefix)
# compute passage-independent embeddings.
candidates = paddle.layer.fc(input=lstm_outs,
bias_attr=False,
size=passage_indep_embedding_dim,
act=paddle.activation.Linear())
weights = paddle.layer.fc(input=lstm_outs,
size=1,
bias_attr=False,
act=paddle.activation.SequenceSoftmax())
weighted_candidates = paddle.layer.scaling(input=candidates, weight=weights)
passage_indep_embedding = paddle.layer.pooling(
input=weighted_candidates, pooling_type=paddle.pooling.Sum())
return paddle.layer.concat(
input=[lstm_final, passage_indep_embedding]), lstm_outs
def question_aligned_passage_embedding(question_lstm_outs, document_embeddings,
passage_aligned_embedding_dim):
"""create question aligned passage embedding.
Arguments:
- question_lstm_outs: The dimension of output of LSTM that process
question word embedding.
- document_embeddings: The document embeddings.
- passage_aligned_embedding_dim: The dimension of passage aligned
embedding.
"""
def outer_sentence_step(document_embeddings, question_lstm_outs,
passage_aligned_embedding_dim):
"""step function for PaddlePaddle's recurrent_group.
In this function, the original input document_embeddings are scattered
from nested sequence into sequence by recurrent_group in PaddlePaddle.
The step function iterates over each sentence in the document.
Arguments:
- document_embeddings: The word embeddings of the document.
- question_lstm_outs: The dimension of output of LSTM that
process question word embedding.
- passage_aligned_embedding_dim: The dimension of passage aligned
embedding.
"""
def inner_word_step(word_embedding, question_lstm_outs,
question_outs_proj, passage_aligned_embedding_dim):
"""
In this recurrent_group, sentence embedding has been scattered into
word embeddings. The step function iterates over each word in one
sentence in the document.
Arguments:
- word_embedding: The word embeddings of documents.
- question_lstm_outs: The dimension of output of LSTM that
process question word embedding.
- question_outs_proj: The projection of question_lstm_outs
into a new hidden space.
- passage_aligned_embedding_dim: The dimension of passage
aligned embedding.
"""
doc_word_expand = paddle.layer.expand(
input=word_embedding,
expand_as=question_lstm_outs,
expand_level=paddle.layer.ExpandLevel.FROM_NO_SEQUENCE)
weights = paddle.layer.fc(
input=[question_lstm_outs, doc_word_expand],
size=1,
bias_attr=False,
act=paddle.activation.SequenceSoftmax())
weighted_candidates = paddle.layer.scaling(
input=question_outs_proj, weight=weights)
return paddle.layer.pooling(
input=weighted_candidates, pooling_type=paddle.pooling.Sum())
question_outs_proj = paddle.layer.fc(input=question_lstm_outs,
bias_attr=False,
size=passage_aligned_embedding_dim)
return paddle.layer.recurrent_group(
input=[
paddle.layer.SubsequenceInput(document_embeddings),
paddle.layer.StaticInput(question_lstm_outs),
paddle.layer.StaticInput(question_outs_proj),
passage_aligned_embedding_dim,
],
step=inner_word_step,
name="iter_over_word")
return paddle.layer.recurrent_group(
input=[
paddle.layer.SubsequenceInput(document_embeddings),
paddle.layer.StaticInput(question_lstm_outs),
passage_aligned_embedding_dim
],
step=outer_sentence_step,
name="iter_over_sen")
def encode_documents(input_embedding, same_as_question, question_vector,
question_lstm_outs, passage_indep_embedding_dim, prefix):
"""Build the final question-aware document embeddings.
Each word in the document is represented as concatenation of its word
vector, the question vector, boolean features indicating if a word appers
in the question or is repeated, and a question aligned embedding.
Arguments:
- input_embedding: The word embeddings of the document.
- same_as_question: The boolean features indicating if a word appears
in the question or is repeated.
- question_lstm_outs: The final question encoding.
- passage_indep_embedding_dim: The dimension of passage independent
embedding.
- prefix: The prefix which will be appended to name of each layer in
This function.
"""
question_expanded = paddle.layer.expand(
input=question_vector,
expand_as=input_embedding,
expand_level=paddle.layer.ExpandLevel.FROM_NO_SEQUENCE)
question_aligned_embedding = question_aligned_passage_embedding(
question_lstm_outs, input_embedding, passage_indep_embedding_dim)
return paddle.layer.concat(input=[
input_embedding, question_expanded, same_as_question,
question_aligned_embedding
])
def search_answer(doc_lstm_outs, sentence_idx, start_idx, end_idx, config,
is_infer):
"""Search the answer from the document.
The search process for this layer begins with searching a target sequence
from a nested sequence by using paddle.layer.kmax_seq_score and
paddle.layer.sub_nested_seq_layer. In the first search step, top beam size
sequences with highest scores, indices of these top k sequences in the
original nested sequence, and the ground truth (also called gold)
altogether (a triple) make up of the first beam.
Then, start and end positions are searched. In these searches, top k
positions with highest scores are selected, and then sequence, starting
from the selected starts till ends of the sequences are taken to search
next by using paddle.layer.seq_slice.
Finally, the layer paddle.layer.cross_entropy_over_beam takes all the beam
expansions which contain several candidate targets found along the
three-step search. cross_entropy_over_beam calculates cross entropy over
the expanded beams which all the candidates in the beam as the normalized
factor.
Note that, if gold falls off the beam at search step t, then the cost is
calculated over the beam at step t.
Arguments:
- doc_lstm_outs: The output of LSTM that process each document words.
- sentence_idx: Ground-truth indicating sentence index of the answer
in the document.
- start_idx: Ground-truth indicating start span index of the answer
in the sentence.
- end_idx: Ground-truth indicating end span index of the answer
in the sentence.
- is_infer: The boolean parameter indicating inferring or training.
"""
last_state_of_sentence = paddle.layer.last_seq(
input=doc_lstm_outs, agg_level=paddle.layer.AggregateLevel.TO_SEQUENCE)
sentence_scores = paddle.layer.fc(input=last_state_of_sentence,
size=1,
bias_attr=False,
act=paddle.activation.Linear())
topk_sentence_ids = paddle.layer.kmax_seq_score(
input=sentence_scores, beam_size=config.beam_size)
topk_sen = paddle.layer.sub_nested_seq(
input=doc_lstm_outs, selected_indices=topk_sentence_ids)
# expand beam to search start positions on selected sentences
start_pos_scores = paddle.layer.fc(
input=topk_sen,
size=1,
layer_attr=paddle.attr.ExtraLayerAttribute(
error_clipping_threshold=5.0),
bias_attr=False,
act=paddle.activation.Linear())
topk_start_pos_ids = paddle.layer.kmax_seq_score(
input=start_pos_scores, beam_size=config.beam_size)
topk_start_spans = paddle.layer.seq_slice(
input=topk_sen, starts=topk_start_pos_ids, ends=None)
# expand beam to search end positions on selected start spans
_, end_span_embedding = basic_modules.stacked_bidirectional_lstm(
topk_start_spans, config.lstm_hidden_dim, config.lstm_depth,
config.lstm_hidden_droprate, "__end_span_embeddings__")
end_pos_scores = paddle.layer.fc(input=end_span_embedding,
size=1,
bias_attr=False,
act=paddle.activation.Linear())
topk_end_pos_ids = paddle.layer.kmax_seq_score(
input=end_pos_scores, beam_size=config.beam_size)
if is_infer:
return [
sentence_scores, topk_sentence_ids, start_pos_scores,
topk_start_pos_ids, end_pos_scores, topk_end_pos_ids
]
else:
return paddle.layer.cross_entropy_over_beam(input=[
paddle.layer.BeamInput(sentence_scores, topk_sentence_ids,
sentence_idx),
paddle.layer.BeamInput(start_pos_scores, topk_start_pos_ids,
start_idx),
paddle.layer.BeamInput(end_pos_scores, topk_end_pos_ids, end_idx)
])
def GNR(config, is_infer=False):
"""Build the globally normalized reader model.
Arguments:
- config: The model configuration.
- is_infer: The boolean parameter indicating inferring or training.
"""
# encode question words
question_embeddings = build_pretrained_embedding(
"question",
paddle.data_type.integer_value_sequence(config.vocab_size),
config.embedding_dim, config.embedding_droprate)
question_vector, question_lstm_outs = encode_question(
question_embeddings, config.lstm_hidden_dim, config.lstm_depth,
config.passage_indep_embedding_dim, "__ques")
# encode document words
document_embeddings = build_pretrained_embedding(
"documents",
paddle.data_type.integer_value_sub_sequence(config.vocab_size),
config.embedding_dim, config.embedding_droprate)
same_as_question = paddle.layer.data(
name="same_as_question",
type=paddle.data_type.dense_vector_sub_sequence(1))
document_words_ecoding = encode_documents(
document_embeddings, same_as_question, question_vector,
question_lstm_outs, config.passage_indep_embedding_dim, "__doc")
doc_lstm_outs = basic_modules.stacked_bidirectional_lstm_by_nested_seq(
document_words_ecoding, config.lstm_depth, config.lstm_hidden_dim,
"__doc_lstm")
# search the answer.
sentence_idx = paddle.layer.data(
name="sen_idx", type=paddle.data_type.integer_value(1))
start_idx = paddle.layer.data(
name="start_idx", type=paddle.data_type.integer_value(1))
end_idx = paddle.layer.data(
name="end_idx", type=paddle.data_type.integer_value(1))
return search_answer(doc_lstm_outs, sentence_idx, start_idx, end_idx,
config, is_infer)
if __name__ == "__main__":
print(parse_network(GNR(ModelConfig)))
| apache-2.0 | 1,345,474,485,290,144,500 | 8,246,834,148,219,362,000 | 43.256173 | 80 | 0.619778 | false |
lostdj/Jaklin-OpenJFX | modules/web/src/main/native/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py | 2 | 9887 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import random
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.bot import irc_command
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.bot.sheriff import Sheriff
from webkitpy.tool.bot.ircbot import IRCBot
from webkitpy.tool.bot.ircbot import Eliza
from webkitpy.tool.bot.sheriff_unittest import MockSheriffBot
from webkitpy.tool.mocktool import MockTool
def run(message):
tool = MockTool()
tool.ensure_irc_connected(None)
bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
bot._message_queue.post(["mock_nick", message])
bot.process_pending_messages()
class IRCBotTest(unittest.TestCase):
def test_eliza(self):
eliza = Eliza()
eliza.execute("tom", "hi", None, None)
eliza.execute("tom", "bye", None, None)
def test_parse_command_and_args(self):
tool = MockTool()
bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
self.assertEqual(bot._parse_command_and_args(""), (Eliza, [""]))
self.assertEqual(bot._parse_command_and_args(" "), (Eliza, [""]))
self.assertEqual(bot._parse_command_and_args(" hi "), (irc_command.Hi, []))
self.assertEqual(bot._parse_command_and_args(" hi there "), (irc_command.Hi, ["there"]))
def test_exception_during_command(self):
tool = MockTool()
tool.ensure_irc_connected(None)
bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
class CommandWithException(object):
def execute(self, nick, args, tool, sheriff):
raise Exception("mock_exception")
bot._parse_command_and_args = lambda request: (CommandWithException, [])
expected_logs = 'MOCK: irc.post: Exception executing command: mock_exception\n'
OutputCapture().assert_outputs(self, bot.process_message, args=["mock_nick", "ignored message"], expected_logs=expected_logs)
class CommandWithException(object):
def execute(self, nick, args, tool, sheriff):
raise KeyboardInterrupt()
bot._parse_command_and_args = lambda request: (CommandWithException, [])
# KeyboardInterrupt and SystemExit are not subclasses of Exception and thus correctly will not be caught.
OutputCapture().assert_outputs(self, bot.process_message, args=["mock_nick", "ignored message"], expected_exception=KeyboardInterrupt)
def test_hi(self):
random.seed(23324)
expected_logs = 'MOCK: irc.post: "Only you can prevent forest fires." -- Smokey the Bear\n'
OutputCapture().assert_outputs(self, run, args=["hi"], expected_logs=expected_logs)
def test_help(self):
expected_logs = 'MOCK: irc.post: mock_nick: Available commands: create-bug, help, hi, ping, restart, rollout, whois, yt?\nMOCK: irc.post: mock_nick: Type "mock-sheriff-bot: help COMMAND" for help on my individual commands.\n'
OutputCapture().assert_outputs(self, run, args=["help"], expected_logs=expected_logs)
expected_logs = 'MOCK: irc.post: mock_nick: Usage: hi\nMOCK: irc.post: mock_nick: Responds with hi.\nMOCK: irc.post: mock_nick: Aliases: hello\n'
OutputCapture().assert_outputs(self, run, args=["help hi"], expected_logs=expected_logs)
OutputCapture().assert_outputs(self, run, args=["help hello"], expected_logs=expected_logs)
def test_restart(self):
expected_logs = "MOCK: irc.post: Restarting...\n"
OutputCapture().assert_outputs(self, run, args=["restart"], expected_logs=expected_logs, expected_exception=TerminateQueue)
def test_rollout(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654 This patch broke the world"], expected_logs=expected_logs)
def test_revert(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["revert 21654 This patch broke the world"], expected_logs=expected_logs)
def test_multi_rollout(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654 21655 21656 This 21654 patch broke the world"], expected_logs=expected_logs)
def test_rollout_with_r_in_svn_revision(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout r21654 This patch broke the world"], expected_logs=expected_logs)
def test_multi_rollout_with_r_in_svn_revision(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout r21654 21655 r21656 This r21654 patch broke the world"], expected_logs=expected_logs)
def test_rollout_bananas(self):
expected_logs = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
OutputCapture().assert_outputs(self, run, args=["rollout bananas"], expected_logs=expected_logs)
def test_rollout_invalidate_revision(self):
# When folks pass junk arguments, we should just spit the usage back at them.
expected_logs = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
OutputCapture().assert_outputs(self, run,
args=["rollout --component=Tools 21654"],
expected_logs=expected_logs)
def test_rollout_invalidate_reason(self):
# FIXME: I'm slightly confused as to why this doesn't return the USAGE message.
expected_logs = """MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...
MOCK: irc.post: mock_nick, abarth, darin, eseidel: Failed to create rollout patch:
MOCK: irc.post: The rollout reason may not begin with - (\"-bad (Requested by mock_nick on #webkit).\").
"""
OutputCapture().assert_outputs(self, run,
args=["rollout 21654 -bad"],
expected_logs=expected_logs)
def test_multi_rollout_invalidate_reason(self):
expected_logs = """MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...
MOCK: irc.post: mock_nick, abarth, darin, eseidel: Failed to create rollout patch:
MOCK: irc.post: The rollout reason may not begin with - (\"-bad (Requested by mock_nick on #webkit).\").
"""
OutputCapture().assert_outputs(self, run,
args=["rollout "
"21654 21655 r21656 -bad"],
expected_logs=expected_logs)
def test_rollout_no_reason(self):
expected_logs = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654"], expected_logs=expected_logs)
def test_multi_rollout_no_reason(self):
expected_logs = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654 21655 r21656"], expected_logs=expected_logs)
| gpl-2.0 | -4,590,233,262,784,009,000 | -8,300,925,459,514,790,000 | 61.575949 | 297 | 0.694043 | false |
bud4/samba | third_party/waf/wafadmin/Runner.py | 32 | 5555 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"Execute the tasks"
import os, sys, random, time, threading, traceback
try: from Queue import Queue
except ImportError: from queue import Queue
import Build, Utils, Logs, Options
from Logs import debug, error
from Constants import *
GAP = 15
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
def process_task(tsk):
m = tsk.master
if m.stop:
m.out.put(tsk)
return
try:
tsk.generator.bld.printout(tsk.display())
if tsk.__class__.stat: ret = tsk.__class__.stat(tsk)
# actual call to task's run() function
else: ret = tsk.call_run()
except Exception, e:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
# TODO cleanup
m.error_handler(tsk)
m.out.put(tsk)
return
if ret:
tsk.err_code = ret
tsk.hasrun = CRASHED
else:
try:
tsk.post_run()
except Utils.WafError:
pass
except Exception:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
else:
tsk.hasrun = SUCCESS
if tsk.hasrun != SUCCESS:
m.error_handler(tsk)
m.out.put(tsk)
class TaskConsumer(threading.Thread):
ready = Queue(0)
consumers = []
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(1)
self.start()
def run(self):
try:
self.loop()
except:
pass
def loop(self):
while 1:
tsk = TaskConsumer.ready.get()
process_task(tsk)
class Parallel(object):
"""
keep the consumer threads busy, and avoid consuming cpu cycles
when no more tasks can be added (end of the build, etc)
"""
def __init__(self, bld, j=2):
# number of consumers
self.numjobs = j
self.manager = bld.task_manager
self.manager.current_group = 0
self.total = self.manager.total()
# tasks waiting to be processed - IMPORTANT
self.outstanding = []
self.maxjobs = MAXJOBS
# tasks that are awaiting for another task to complete
self.frozen = []
# tasks returned by the consumers
self.out = Queue(0)
self.count = 0 # tasks not in the producer area
self.processed = 1 # progress indicator
self.stop = False # error condition to stop the build
self.error = False # error flag
def get_next(self):
"override this method to schedule the tasks in a particular order"
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self, tsk):
"override this method to schedule the tasks in a particular order"
# TODO consider using a deque instead
if random.randint(0, 1):
self.frozen.insert(0, tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"called to set the next group of tasks"
while self.count > self.numjobs + GAP or self.count >= self.maxjobs:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
if self.frozen:
self.outstanding += self.frozen
self.frozen = []
elif not self.count:
(jobs, tmp) = self.manager.get_next_set()
if jobs != None: self.maxjobs = jobs
if tmp: self.outstanding += tmp
break
def get_out(self):
"the tasks that are put to execute are all collected using get_out"
ret = self.out.get()
self.manager.add_finished(ret)
if not self.stop and getattr(ret, 'more_tasks', None):
self.outstanding += ret.more_tasks
self.total += len(ret.more_tasks)
self.count -= 1
def error_handler(self, tsk):
"by default, errors make the build stop (not thread safe so be careful)"
if not Options.options.keep:
self.stop = True
self.error = True
def start(self):
"execute the tasks"
if TaskConsumer.consumers:
# the worker pool is usually loaded lazily (see below)
# in case it is re-used with a different value of numjobs:
while len(TaskConsumer.consumers) < self.numjobs:
TaskConsumer.consumers.append(TaskConsumer())
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
self.manager.add_finished(tsk)
continue
try:
st = tsk.runnable_status()
except Exception, e:
self.processed += 1
if self.stop and not Options.options.keep:
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
continue
self.error_handler(tsk)
self.manager.add_finished(tsk)
tsk.hasrun = EXCEPTION
tsk.err_msg = Utils.ex_stack()
continue
if st == ASK_LATER:
self.postpone(tsk)
elif st == SKIP_ME:
self.processed += 1
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
else:
# run me: put the task in ready queue
tsk.position = (self.processed, self.total)
self.count += 1
tsk.master = self
self.processed += 1
if self.numjobs == 1:
process_task(tsk)
else:
TaskConsumer.ready.put(tsk)
# create the consumer threads only if there is something to consume
if not TaskConsumer.consumers:
TaskConsumer.consumers = [TaskConsumer() for i in xrange(self.numjobs)]
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
#print loop
assert (self.count == 0 or self.stop)
| gpl-3.0 | -3,519,267,322,092,082,000 | 5,038,965,763,387,342,000 | 22.638298 | 89 | 0.676508 | false |
erinspace/osf.io | api/wb/serializers.py | 1 | 2711 | from django.db import IntegrityError
from rest_framework import serializers as ser
from rest_framework import exceptions
from website.files import exceptions as file_exceptions
from api.base.serializers import IDField, ShowIfVersion
class DestinationSerializer(ser.Serializer):
parent = ser.CharField(write_only=True)
target = ser.CharField(write_only=True)
name = ser.CharField(write_only=True, allow_blank=True, allow_null=True)
node = ShowIfVersion(
ser.CharField(write_only=True),
min_version='2.0', max_version='2.7'
)
class WaterbutlerMetadataSerializer(ser.Serializer):
source = ser.CharField(write_only=True)
destination = DestinationSerializer(write_only=True)
id = IDField(source='_id', read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True, help_text='Display name used in the general user interface')
created = ser.CharField(read_only=True)
modified = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
checkout = ser.SerializerMethodField(read_only=True)
version = ser.IntegerField(help_text='Latest file version', read_only=True, source='current_version_number')
downloads = ser.SerializerMethodField()
sha256 = ser.SerializerMethodField()
md5 = ser.SerializerMethodField()
size = ser.SerializerMethodField()
def get_checkout(self, obj):
return obj.checkout._id if obj.checkout else None
def get_downloads(self, obj):
return obj.get_download_count()
def get_sha256(self, obj):
return obj.versions.first().metadata.get('sha256', None) if obj.versions.exists() else None
def get_md5(self, obj):
return obj.versions.first().metadata.get('md5', None) if obj.versions.exists() else None
def get_size(self, obj):
if obj.versions.exists():
self.size = obj.versions.first().size
return self.size
return None
def create(self, validated_data):
source = validated_data.pop('source')
destination = validated_data.pop('destination')
name = validated_data.pop('name')
try:
return self.context['view'].perform_file_action(source, destination, name)
except IntegrityError:
raise exceptions.ValidationError('File already exists with this name.')
except file_exceptions.FileNodeCheckedOutError:
raise exceptions.ValidationError('Cannot move file as it is checked out.')
except file_exceptions.FileNodeIsPrimaryFile:
raise exceptions.ValidationError('Cannot move file as it is the primary file of preprint.')
class Meta:
type_ = 'file_metadata'
| apache-2.0 | 445,132,904,214,480,260 | -6,925,423,207,758,082,000 | 37.183099 | 112 | 0.695315 | false |
amghost/myblog | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/latex.py | 96 | 13931 | # -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
*New in Pygments 0.7.*
*New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``). *New in Pygments 1.2.*
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``). *New in Pygments 1.2.*
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.items():
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(r'\begin{Verbatim}[commandchars=\\\{\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(',numbers=left' +
(start and ',firstnumber=%d' % start or '') +
(step and ',stepnumber=%d' % step or ''))
if self.mathescape or self.texcomments:
outfile.write(r',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
if self.verboptions:
outfile.write(',' + self.verboptions)
outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in range(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
else:
value = escape_tex(value, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write('\\end{Verbatim}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
| mit | 483,209,968,109,513,600 | 7,171,186,746,373,933,000 | 35.854497 | 80 | 0.543034 | false |
ashvina/heron | heron/tools/tracker/src/python/handlers/runtimestatehandler.py | 4 | 4623 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' runtimestatehandler.py '''
import traceback
import tornado.gen
import tornado.web
from heron.common.src.python.utils.log import Log
from heron.proto import tmaster_pb2
from heron.tools.tracker.src.python.handlers import BaseHandler
# pylint: disable=attribute-defined-outside-init
class RuntimeStateHandler(BaseHandler):
"""
URL - /topologies/runtimestate
Parameters:
- cluster (required)
- environ (required)
- role - (optional) Role used to submit the topology.
- topology (required) name of the requested topology
The response JSON is a dictionary with all the
runtime information of a topology. Static properties
is availble from /topologies/metadata.
Example JSON response:
{
has_tmaster_location: true,
stmgrs_reg_summary: {
registered_stmgrs: [
"stmgr-1",
"stmgr-2"
],
absent_stmgrs: [ ]
},
has_scheduler_location: true,
has_physical_plan: true
}
"""
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
# pylint: disable=dangerous-default-value, no-self-use, unused-argument
@tornado.gen.coroutine
def getStmgrsRegSummary(self, tmaster, callback=None):
"""
Get summary of stream managers registration summary
"""
if not tmaster or not tmaster.host or not tmaster.stats_port:
return
reg_request = tmaster_pb2.StmgrsRegistrationSummaryRequest()
request_str = reg_request.SerializeToString()
port = str(tmaster.stats_port)
host = tmaster.host
url = "http://{0}:{1}/stmgrsregistrationsummary".format(host, port)
request = tornado.httpclient.HTTPRequest(url,
body=request_str,
method='POST',
request_timeout=5)
Log.debug('Making HTTP call to fetch stmgrsregistrationsummary url: %s', url)
try:
client = tornado.httpclient.AsyncHTTPClient()
result = yield client.fetch(request)
Log.debug("HTTP call complete.")
except tornado.httpclient.HTTPError as e:
raise Exception(str(e))
# Check the response code - error if it is in 400s or 500s
responseCode = result.code
if responseCode >= 400:
message = "Error in getting exceptions from Tmaster, code: " + responseCode
Log.error(message)
raise tornado.gen.Return({
"message": message
})
# Parse the response from tmaster.
reg_response = tmaster_pb2.StmgrsRegistrationSummaryResponse()
reg_response.ParseFromString(result.body)
# Send response
ret = {}
for stmgr in reg_response.registered_stmgrs:
ret[stmgr] = True
for stmgr in reg_response.absent_stmgrs:
ret[stmgr] = False
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)
runtime_state = topology_info["runtime_state"]
runtime_state["topology_version"] = topology_info["metadata"]["release_version"]
topology = self.tracker.getTopologyByClusterRoleEnvironAndName(
cluster, role, environ, topology_name)
reg_summary = yield tornado.gen.Task(self.getStmgrsRegSummary, topology.tmaster)
for stmgr, reg in reg_summary.items():
runtime_state["stmgrs"].setdefault(stmgr, {})["is_registered"] = reg
self.write_success_response(runtime_state)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
| apache-2.0 | -5,735,060,558,975,183,000 | 7,989,124,056,011,594,000 | 36.282258 | 89 | 0.677698 | false |
JohnOmernik/pimeup | throne/throne.py | 1 | 11168 | #!/usr/bin/python
#Gateway
import time
import random
import sys
import cwiid
import json
import gevent
from collections import OrderedDict
import cStringIO
import alsaaudio
import wave
import requests
import os
import struct
import math
from dotstar import Adafruit_DotStar
import socket
WHATAMI = os.path.basename(__file__).replace(".py", "")
WHOAMI = socket.gethostname()
m = alsaaudio.Mixer('PCM')
current_volume = m.getvolume() # Get the current Volume
print("Cur Vol: %s " % current_volume)
m.setvolume(100) # Set the volume to 70%.
current_volume = m.getvolume() # Get the current Volume
print("Cur Vol: %s " % current_volume)
mesg = False
rpt_mode = 0
wiimote = None
connected = False
rumble = 0
numpixels = 264 # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = 23
clockpin = 24
fire_colors = [ "#001100", "#005500", "#00FF00", "#33FFFF", "#FFFFFF" ]
outtimes = {}
mydelays = [0.001]
#, 0.02, 0.03, 0.1, 0.15]
heat = []
heat = []
for x in range(numpixels):
heat.append(30)
COOLING = 15
num_colors = 100
my_colors = []
colors_dict = OrderedDict()
allcolors = []
fireplacestarttime = 0
soundstarttime = 0
curplay = 66
lasthb = 0
hbinterval = 30
fireplace = True
fireplacestart = False
soundstart = False
soundplaying = False
#Setting color to: 0xFF0000 # Green
#Setting color to: 0xCC00CC # Bright Teal
#Setting color to: 0x66CC00 # Orange
#Setting color to: 0x33FFFF # Magenta
#Setting color to: 0xFF00 # Red
#Setting color to: 0x330099 # Lightish Blue
#Setting color to: 0xFFFF00 # YEllow
#Setting color to: 0xFF # Bright Blue
#Setting color to: 0xFF9900 # YEllower Gren
#Setting color to: 0x33 # Dark BLue
strip = Adafruit_DotStar(numpixels, datapin, clockpin)
strip.setBrightness(255)
strip.begin() # Initialize pins for output
def main():
global strip
global allcolors
global firecolors
logevent("startup", "startup", "Just started and ready to run")
for x in range(len(fire_colors)):
if x == len(fire_colors) -1:
pass
else:
print("Adding gradient for %s (%s) to %s (%s) with %s colors" % (fire_colors[x], hex_to_RGB(fire_colors[x]), fire_colors[x+1], hex_to_RGB(fire_colors[x+1]), num_colors))
gtmp = linear_gradient(fire_colors[x], fire_colors[x+1], num_colors)
my_colors.append(gtmp['hex'])
colors_dict[fire_colors[x] + "_2_" + fire_colors[x+1]] = gtmp['hex']
for x in colors_dict:
for y in colors_dict[x]:
# print("Color: %s" % hex_to_RGB(y))
allcolors.append(y)
#Connect to address given on command-line, if present
print 'Put Wiimote in discoverable mode now (press 1+2)...'
global wiimote
global rpt_mode
global connected
global rumble
print("Trying Connection")
print ("Press 1+2")
while not connected:
try:
wiimote = cwiid.Wiimote()
print("Connected!")
connected = True
rumble ^= 1
wiimote.rumble = rumble
time.sleep(2)
rumble ^= 1
wiimote.rumble = rumble
logevent("wii", "connect", "Wii remote just synced up")
except:
print("Trying Again, please press 1+2")
time.sleep(2)
wiimote.mesg_callback = callback
print("For LED we enable Button")
rpt_mode ^= cwiid.RPT_BTN
# Enable the messages in callback
wiimote.enable(cwiid.FLAG_MESG_IFC);
wiimote.rpt_mode = rpt_mode
gevent.joinall([
gevent.spawn(normal),
gevent.spawn(FirePlace),
gevent.spawn(playSound),
])
def logevent(etype, edata, edesc):
global WHOAMI
global WHATAMI
curtime = int(time.time())
curts = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(curtime))
outrec = OrderedDict()
outrec['ts'] = curts
outrec['host'] = WHOAMI
outrec['script'] = WHATAMI
outrec['event_type'] = etype
outrec['event_data'] = edata
outrec['event_desc'] = edesc
sendlog(outrec, False)
outrec = None
def normal():
global strip
global lasthb
global hbinterval
global soundstart
global curplay
global fireplacestart
global fireplacestarttime
global soundstarttime
global heat
global outtimes
global soundplaying
try:
while True:
curtime = int(time.time())
if curtime - lasthb > hbinterval:
logevent("heartbeat", wiimote.state['battery'], "wii HB")
lasthb = curtime
gevent.sleep(0.001)
except KeyboardInterrupt:
print("Exiting")
setAllLEDS(strip, [0x000000])
strip.setBrightness(0)
strip.show()
sys.exit()
def playSound():
global soundstart
global fireplacestart
global soundplaying
sounds = [0, 0, 0]
channels = 2
rate = 44100
size = 1024
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
out_stream.setchannels(channels)
out_stream.setrate(rate)
out_stream.setperiodsize(size)
soundreset = False
soundfiles = ['/home/pi/tool_mantra.wav']
memsound = {}
print("Loading Sound files to memory")
for sf in soundfiles:
f = open(sf, "rb")
sfdata = f.read()
f.close()
memsound[sf] = cStringIO.StringIO(sfdata)
while True:
if soundstart == True:
if soundreset == False:
curfile = random.choice(soundfiles)
memsound[curfile].seek(0)
soundreset = True
soundstart = False
soundplaying = True
fireplacestart = True
data = memsound[curfile].read(size)
while data:
out_stream.write(data)
data = memsound[curfile].read(size)
gevent.sleep(0.001)
soundreset = False
soundplaying = False
else:
soundplaying = False
gevent.sleep(0.001)
def FirePlace():
global numpixels
global COOLING
global strip
global allcolors
global heat
global fireplacestart
global fireplace
# Every cycle there will be some random cololing
# Consider adding a degree of random whether a pixel cools
try:
while True:
#If we see start then reset all to 255
if fireplacestart == True:
for i in range(numpixels):
heat[i] = 255
fireplacestart = False
if fireplace == True:
for i in range(numpixels):
if random.randint(0, 255) < COOLING:
tval = heat[i] - random.randint(0, ((COOLING * 10) / numpixels) + 2)
heat[i] = tval
gevent.sleep(random.choice(mydelays))
# This is supposed to be a diffusing effect I think
# k = numpixels -3
# while k > 2:
# if random.randint(0, 255) * 2 < COOLING:
# tval = (heat[k-1] + heat[ k- 2 ] + heat[ k- 2] ) / 3
# heat[k] = tval
# k = k - 1
# gevent.sleep(random.choice(mydelays))
# Now, actually set the pixels based on a scaled representation of all pixels
for j in range(numpixels):
if heat[j] > 255:
heat[j] = 255
if heat[j] < 0:
heat[j] = 0
newcolor = int((heat[j] * len(allcolors)) / 256)
strip.setPixelColor(j, int(allcolors[newcolor].replace("#", ''), 16))
gevent.sleep(random.choice(mydelays))
strip.show()
gevent.sleep(random.choice(mydelays))
else:
gevent.sleep(0.001)
except KeyboardInterrupt:
print("")
print("exiting and shutting down strip")
setAllLEDS(strip, [0x000000])
sys.exit(0)
def sendlog(log, debug):
logurl = "http://hauntcontrol:5050/hauntlogs"
try:
r = requests.post(logurl, json=log)
if debug:
print("Posted to %s status code %s" % (logurl, r.status_code))
print(json.dumps(log))
except:
if debug:
print("Post to %s failed timed out?" % logurl)
print(json.dumps(log))
def setAllLEDS(strip, colorlist):
for x in range(numpixels):
strip.setPixelColor(x, colorlist[0])
strip.show()
def rms(frame):
SHORT_NORMALIZE = (1.0/32768.0)
CHUNK = 1024
swidth = 2
count = len(frame)/swidth
format = "%dh"%(count)
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
rms = math.pow(sum_squares/count,0.5);
return rms * 10000
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [ int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
def handle_buttons(buttons):
global heat
global strip
global soundstart
global soundplaying
if (buttons & cwiid.BTN_A):
print("soundplaying in A: %s" % soundplaying)
if soundplaying == False:
soundstart = True
logevent("index_change", "reset", "Reset the index to start loop again")
gevent.sleep(0.001)
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def callback(mesg_list, time):
for mesg in mesg_list:
if mesg[0] == cwiid.MESG_BTN:
handle_buttons(mesg[1])
else:
print 'Unknown Report'
if __name__ == "__main__":
main()
| apache-2.0 | -2,151,151,277,523,076,400 | 5,258,994,041,845,146,000 | 27.56266 | 182 | 0.588378 | false |
andresriancho/w3af-webui | src/w3af_webui/migrations/0006_auto__add_field_scan_show_report_time.py | 1 | 10565 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Scan.show_report_time'
db.add_column(u'scans', 'show_report_time', self.gf('django.db.models.fields.DateTimeField')(null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Scan.show_report_time'
db.delete_column(u'scans', 'show_report_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'w3af_webui.profile': {
'Meta': {'object_name': 'Profile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang_ui': ('django.db.models.fields.CharField', [], {'default': "'ru'", 'max_length': '4'}),
'list_per_page': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50'}),
'notification': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'w3af_webui.profilestargets': {
'Meta': {'object_name': 'ProfilesTargets', 'db_table': "u'profiles_targets'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scan_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.ScanProfile']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.Target']", 'blank': 'True'})
},
'w3af_webui.profilestasks': {
'Meta': {'object_name': 'ProfilesTasks', 'db_table': "u'profiles_tasks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scan_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.ScanProfile']"}),
'scan_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.ScanTask']", 'blank': 'True'})
},
'w3af_webui.scan': {
'Meta': {'object_name': 'Scan', 'db_table': "u'scans'"},
'data': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1991, 1, 1, 0, 0)', 'null': 'True'}),
'pid': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'null': 'True'}),
'result_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'null': 'True'}),
'scan_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.ScanTask']"}),
'show_report_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 26, 19, 7, 15, 663038)'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'w3af_webui.scanprofile': {
'Meta': {'object_name': 'ScanProfile', 'db_table': "u'scan_profiles'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '240'}),
'short_comment': ('django.db.models.fields.CharField', [], {'max_length': '240', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'w3af_profile': ('django.db.models.fields.TextField', [], {'default': "'\\n'", 'blank': 'True'})
},
'w3af_webui.scantask': {
'Meta': {'object_name': 'ScanTask', 'db_table': "u'scan_tasks'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'cron': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'repeat_at': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'repeat_each': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'repeat_each_day': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'repeat_each_weekday': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.Target']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'w3af_webui.target': {
'Meta': {'object_name': 'Target', 'db_table': "u'targets'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_scan': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '240'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'w3af_webui.vulnerability': {
'Meta': {'object_name': 'Vulnerability', 'db_table': "u'vulnerabilities'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'http_transaction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.Scan']"}),
'severity': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'vuln_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.VulnerabilityType']", 'null': 'True'})
},
'w3af_webui.vulnerabilitytype': {
'Meta': {'object_name': 'VulnerabilityType', 'db_table': "u'vulnerability_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
}
}
complete_apps = ['w3af_webui']
| gpl-2.0 | -367,172,958,301,439,300 | -1,539,995,333,191,703,600 | 75.007194 | 182 | 0.547468 | false |
Microsoft/ChakraCore | test/native-tests/test-python/helloWorld.py | 3 | 2388 | #-------------------------------------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
#-------------------------------------------------------------------------------------------------------
import sys
import os.path
from ctypes import *
if sys.platform == "darwin":
platform = "dylib"
else:
platform = "so"
build_type = sys.argv[1]
if len(sys.argv) > 2 and sys.argv[2] != None:
so_path = sys.argv[2]
else:
so_path = "../../../out/" + build_type + "/libChakraCore." + platform
if os.path.isfile(so_path):
chakraCore = CDLL(so_path)
else:
print platform + " file not found. It must be a static library"
sys.exit(0) # static build
script = create_string_buffer("(()=>{return \'Hello world!\';})()")
fileName = "sample.js"
runtime = c_void_p()
# Create Javascript Runtime.
chakraCore.JsCreateRuntime(0, 0, byref(runtime));
context = c_void_p()
# Create an execution context.
chakraCore.JsCreateContext(runtime, byref(context));
# Now set the current execution context.
chakraCore.JsSetCurrentContext(context);
fname = c_void_p();
# create JsValueRef from filename
chakraCore.JsCreateString(fileName, len(fileName), byref(fname));
scriptSource = c_void_p();
# Create ArrayBuffer from script source
chakraCore.JsCreateExternalArrayBuffer(script, len(script), 0, 0, byref(scriptSource));
jsResult = c_void_p();
# Run the script.
chakraCore.JsRun(scriptSource, 0, fname, 0, byref(jsResult));
# Convert script result to String in JavaScript; redundant if script returns a String
resultJSString = c_void_p()
chakraCore.JsConvertValueToString(jsResult, byref(resultJSString));
stringLength = c_size_t();
# Get buffer size needed for the result string
chakraCore.JsCopyString(resultJSString, 0, 0,byref(stringLength));
resultSTR = create_string_buffer(stringLength.value + 1); # buffer is big enough to store the result
# Get String from JsValueRef
chakraCore.JsCopyString(resultJSString, byref(resultSTR), stringLength.value, 0);
# Set `null-ending` to the end
resultSTRLastByte = (c_char * stringLength.value).from_address(addressof(resultSTR))
resultSTRLastByte = '\0';
print("Result from ChakraCore: ", resultSTR.value);
# Dispose runtime
chakraCore.JsDisposeRuntime(runtime);
| mit | 1,591,115,260,834,640,400 | -9,030,117,347,216,591,000 | 31.712329 | 104 | 0.671273 | false |
edxnercel/edx-platform | common/lib/xmodule/xmodule/exceptions.py | 171 | 1339 | class InvalidDefinitionError(Exception):
pass
class NotFoundError(Exception):
pass
class ProcessingError(Exception):
'''
An error occurred while processing a request to the XModule.
For example: if an exception occurs while checking a capa problem.
'''
pass
class InvalidVersionError(Exception):
"""
Tried to save an item with a location that a store cannot support (e.g., draft version
for a non-leaf node)
"""
def __init__(self, location):
super(InvalidVersionError, self).__init__()
self.location = location
class SerializationError(Exception):
"""
Thrown when a module cannot be exported to XML
"""
def __init__(self, location, msg):
super(SerializationError, self).__init__(msg)
self.location = location
class UndefinedContext(Exception):
"""
Tried to access an xmodule field which needs a different context (runtime) to have a value.
"""
pass
class HeartbeatFailure(Exception):
"""
Raised when heartbeat fails.
"""
def __unicode__(self, *args, **kwargs):
return self.message
def __init__(self, msg, service):
"""
In addition to a msg, provide the name of the service.
"""
self.service = service
super(HeartbeatFailure, self).__init__(msg)
| agpl-3.0 | 6,697,200,057,977,861,000 | 265,664,135,480,958,940 | 22.910714 | 95 | 0.638536 | false |
yograterol/django | tests/template_tests/filter_tests/test_length_is.py | 360 | 3204 | from django.template.defaultfilters import length_is
from django.test import SimpleTestCase
from ..utils import setup
class LengthIsTests(SimpleTestCase):
@setup({'length_is01': '{% if some_list|length_is:"4" %}Four{% endif %}'})
def test_length_is01(self):
output = self.engine.render_to_string('length_is01', {'some_list': ['4', None, True, {}]})
self.assertEqual(output, 'Four')
@setup({'length_is02': '{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is02(self):
output = self.engine.render_to_string('length_is02', {'some_list': ['4', None, True, {}, 17]})
self.assertEqual(output, 'Not Four')
@setup({'length_is03': '{% if mystring|length_is:"4" %}Four{% endif %}'})
def test_length_is03(self):
output = self.engine.render_to_string('length_is03', {'mystring': 'word'})
self.assertEqual(output, 'Four')
@setup({'length_is04': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is04(self):
output = self.engine.render_to_string('length_is04', {'mystring': 'Python'})
self.assertEqual(output, 'Not Four')
@setup({'length_is05': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is05(self):
output = self.engine.render_to_string('length_is05', {'mystring': ''})
self.assertEqual(output, 'Not Four')
@setup({'length_is06': '{% with var|length as my_length %}{{ my_length }}{% endwith %}'})
def test_length_is06(self):
output = self.engine.render_to_string('length_is06', {'var': 'django'})
self.assertEqual(output, '6')
# Boolean return value from length_is should not be coerced to a string
@setup({'length_is07': '{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}'})
def test_length_is07(self):
output = self.engine.render_to_string('length_is07', {})
self.assertEqual(output, 'Length not 0')
@setup({'length_is08': '{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}'})
def test_length_is08(self):
output = self.engine.render_to_string('length_is08', {})
self.assertEqual(output, 'Length is 1')
# Invalid uses that should fail silently.
@setup({'length_is09': '{{ var|length_is:"fish" }}'})
def test_length_is09(self):
output = self.engine.render_to_string('length_is09', {'var': 'django'})
self.assertEqual(output, '')
@setup({'length_is10': '{{ int|length_is:"1" }}'})
def test_length_is10(self):
output = self.engine.render_to_string('length_is10', {'int': 7})
self.assertEqual(output, '')
@setup({'length_is11': '{{ none|length_is:"1" }}'})
def test_length_is11(self):
output = self.engine.render_to_string('length_is11', {'none': None})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_empty_list(self):
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
def test_string(self):
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is('a', 10), False)
| bsd-3-clause | -6,053,409,603,038,627,000 | 3,674,133,825,676,909,000 | 41.72 | 102 | 0.610175 | false |
MebiusHKU/flask-web | flask/lib/python2.7/site-packages/sqlalchemy/sql/annotation.py | 60 | 6136 | # sql/annotation.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The :class:`.Annotated` class and related routines; creates hash-equivalent
copies of SQL constructs which contain context-specific markers and
associations.
"""
from .. import util
from . import operators
class Annotated(object):
"""clones a ClauseElement and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__cmp__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
"""
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = _new_annotation_type(element.__class__, cls)
return object.__new__(cls)
def __init__(self, element, values):
self.__dict__ = element.__dict__.copy()
self.__element = element
self._annotations = values
self._hash = hash(element)
def _annotate(self, values):
_values = self._annotations.copy()
_values.update(values)
return self._with_annotations(_values)
def _with_annotations(self, values):
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone._annotations = values
return clone
def _deannotate(self, values=None, clone=True):
if values is None:
return self.__element
else:
_values = self._annotations.copy()
for v in values:
_values.pop(v, None)
return self._with_annotations(_values)
def _compiler_dispatch(self, visitor, **kw):
return self.__element.__class__._compiler_dispatch(
self, visitor, **kw)
@property
def _constructor(self):
return self.__element._constructor
def _clone(self):
clone = self.__element._clone()
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occurred
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return self.__class__(clone, self._annotations)
def __hash__(self):
return self._hash
def __eq__(self, other):
if isinstance(self.__element, operators.ColumnOperators):
return self.__element.__class__.__eq__(self, other)
else:
return hash(other) == hash(self)
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable.
annotated_classes = {}
def _deep_annotate(element, annotations, exclude=None):
"""Deep copy the given ClauseElement, annotating each element
with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
def clone(elem):
if exclude and \
hasattr(elem, 'proxy_set') and \
elem.proxy_set.intersection(exclude):
newelem = elem._clone()
elif annotations != elem._annotations:
newelem = elem._annotate(annotations)
else:
newelem = elem
newelem._copy_internals(clone=clone)
return newelem
if element is not None:
element = clone(element)
return element
def _deep_deannotate(element, values=None):
"""Deep copy the given element, removing annotations."""
cloned = util.column_dict()
def clone(elem):
# if a values dict is given,
# the elem must be cloned each time it appears,
# as there may be different annotations in source
# elements that are remaining. if totally
# removing all annotations, can assume the same
# slate...
if values or elem not in cloned:
newelem = elem._deannotate(values=values, clone=True)
newelem._copy_internals(clone=clone)
if not values:
cloned[elem] = newelem
return newelem
else:
return cloned[elem]
if element is not None:
element = clone(element)
return element
def _shallow_annotate(element, annotations):
"""Annotate the given ClauseElement and copy its internals so that
internal objects refer to the new annotated object.
Basically used to apply a "dont traverse" annotation to a
selectable, without digging throughout the whole
structure wasting time.
"""
element = element._annotate(annotations)
element._copy_internals()
return element
def _new_annotation_type(cls, base_cls):
if issubclass(cls, Annotated):
return cls
elif cls in annotated_classes:
return annotated_classes[cls]
for super_ in cls.__mro__:
# check if an Annotated subclass more specific than
# the given base_cls is already registered, such
# as AnnotatedColumnElement.
if super_ in annotated_classes:
base_cls = annotated_classes[super_]
break
annotated_classes[cls] = anno_cls = type(
"Annotated%s" % cls.__name__,
(base_cls, cls), {})
globals()["Annotated%s" % cls.__name__] = anno_cls
return anno_cls
def _prepare_annotations(target_hierarchy, base_cls):
stack = [target_hierarchy]
while stack:
cls = stack.pop()
stack.extend(cls.__subclasses__())
_new_annotation_type(cls, base_cls)
| bsd-3-clause | -642,432,035,490,665,700 | 3,218,632,842,990,948,400 | 30.306122 | 78 | 0.613592 | false |
jakevdp/networkx | examples/algorithms/davis_club.py | 44 | 1064 | #!/usr/bin/env python
"""
Davis Southern Club Women
Shows how to make unipartite projections of the graph and compute the
properties of those graphs.
These data were collected by Davis et al. in the 1930s.
They represent observed attendance at 14 social events by 18 Southern women.
The graph is bipartite (clubs, women).
"""
import networkx as nx
import networkx.algorithms.bipartite as bipartite
G = nx.davis_southern_women_graph()
women = G.graph['top']
clubs = G.graph['bottom']
print("Biadjacency matrix")
print(bipartite.biadjacency_matrix(G,women,clubs))
# project bipartite graph onto women nodes
W = bipartite.projected_graph(G, women)
print('')
print("#Friends, Member")
for w in women:
print('%d %s' % (W.degree(w),w))
# project bipartite graph onto women nodes keeping number of co-occurence
# the degree computed is weighted and counts the total number of shared contacts
W = bipartite.weighted_projected_graph(G, women)
print('')
print("#Friend meetings, Member")
for w in women:
print('%d %s' % (W.degree(w,weight='weight'),w))
| bsd-3-clause | 2,458,284,554,979,939,300 | 382,644,730,351,948,200 | 28.555556 | 80 | 0.738722 | false |
MadDogTechnology/kops | vendor/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 | 7,797,832,598,641,764,000 | -8,792,288,516,518,867,000 | 30.896825 | 96 | 0.569545 | false |
KenkoGeek/2book | tobook/tobook/settings.py | 1 | 3747 | """
Django settings for tobook project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qaa1vsyq9*z-d350cjb@k8&4()*3t)%6_bj-vz4=tq1hp=0hh3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'material.admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'material.theme.cyan',
'material',
'places',
'object2book',
'booking',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tobook.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tobook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'to-book',
'USER': 'tobook',
'PASSWORD': '123456',
'HOST': '172.17.0.2',
'PORT': '',
}
}
# Email smtp configurations
"""Declare enviroment variables first to set this"""
EMAIL_HOST = os.environ.get('SMTP_HOST')
EMAIL_PORT = os.environ.get('SMTP_PORT')
EMAIL_HOST_USER = os.environ.get('SMTP_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('SMTP_HOST_PASSWD')
FROM_EMAIL = os.environ.get('SMTP_FROM_ADDR')
EMAIL_USE_TLS = True
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
PLACES_MAPS_API_KEY='AIzaSyAVDsYNHfwpeiumJO30Kghw0RjMGwMObT8'
MAP_WIDGET_HEIGHT=480
MAP_OPTIONS={}
MARKER_OPTIONS={}
| mit | 5,107,069,764,526,645,000 | 5,975,593,582,368,444,000 | 25.202797 | 91 | 0.681879 | false |
ekasitk/sahara | sahara/conductor/api.py | 1 | 17877 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles all requests to the conductor service."""
from oslo_config import cfg
from oslo_log import log as logging
from sahara.conductor import manager
from sahara.conductor import resource as r
conductor_opts = [
cfg.BoolOpt('use_local',
default=True,
help='Perform sahara-conductor operations locally.'),
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
def _get_id(obj):
"""Return object id.
Allows usage of both an object or an object's ID as a parameter when
dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class LocalApi(object):
"""A local version of the conductor API.
It does database updates locally instead of via RPC.
"""
def __init__(self):
self._manager = manager.ConductorManager()
# Cluster ops
@r.wrap(r.ClusterResource)
def cluster_get(self, context, cluster, show_progress=False):
"""Return the cluster or None if it does not exist."""
return self._manager.cluster_get(
context, _get_id(cluster), show_progress)
@r.wrap(r.ClusterResource)
def cluster_get_all(self, context, **kwargs):
"""Get all clusters filtered by **kwargs.
e.g. cluster_get_all(plugin_name='vanilla', hadoop_version='1.1')
"""
return self._manager.cluster_get_all(context, **kwargs)
@r.wrap(r.ClusterResource)
def cluster_create(self, context, values):
"""Create a cluster from the values dictionary.
:returns: the created cluster.
"""
return self._manager.cluster_create(context, values)
@r.wrap(r.ClusterResource)
def cluster_update(self, context, cluster, values):
"""Update the cluster with the given values dictionary.
:returns: the updated cluster.
"""
return self._manager.cluster_update(context, _get_id(cluster),
values)
def cluster_destroy(self, context, cluster):
"""Destroy the cluster or raise if it does not exist.
:returns: None.
"""
self._manager.cluster_destroy(context, _get_id(cluster))
# Node Group ops
def node_group_add(self, context, cluster, values):
"""Create a node group from the values dictionary.
:returns: ID of the created node group.
"""
return self._manager.node_group_add(context, _get_id(cluster), values)
def node_group_update(self, context, node_group, values):
"""Update the node group with the given values dictionary.
:returns: None.
"""
self._manager.node_group_update(context, _get_id(node_group), values)
def node_group_remove(self, context, node_group):
"""Destroy the node group or raise if it does not exist.
:returns: None.
"""
self._manager.node_group_remove(context, _get_id(node_group))
# Instance ops
def instance_add(self, context, node_group, values):
"""Create an instance from the values dictionary.
:returns: ID of the created instance.
"""
return self._manager.instance_add(context, _get_id(node_group), values)
def instance_update(self, context, instance, values):
"""Update the instance with the given values dictionary.
:returns: None.
"""
self._manager.instance_update(context, _get_id(instance), values)
def instance_remove(self, context, instance):
"""Destroy the instance or raise if it does not exist.
:returns: None.
"""
self._manager.instance_remove(context, _get_id(instance))
# Volumes ops
def append_volume(self, context, instance, volume_id):
"""Append volume_id to instance."""
self._manager.append_volume(context, _get_id(instance), volume_id)
def remove_volume(self, context, instance, volume_id):
"""Remove volume_id in instance."""
self._manager.remove_volume(context, _get_id(instance), volume_id)
# Cluster Template ops
@r.wrap(r.ClusterTemplateResource)
def cluster_template_get(self, context, cluster_template):
"""Return the cluster template or None if it does not exist."""
return self._manager.cluster_template_get(context,
_get_id(cluster_template))
@r.wrap(r.ClusterTemplateResource)
def cluster_template_get_all(self, context, **kwargs):
"""Get all cluster templates filtered by **kwargs.
e.g. cluster_template_get_all(plugin_name='vanilla',
hadoop_version='1.1')
"""
return self._manager.cluster_template_get_all(context, **kwargs)
@r.wrap(r.ClusterTemplateResource)
def cluster_template_create(self, context, values):
"""Create a cluster template from the values dictionary.
:returns: the created cluster template
"""
return self._manager.cluster_template_create(context, values)
def cluster_template_destroy(self, context, cluster_template,
ignore_default=False):
"""Destroy the cluster template or raise if it does not exist.
:returns: None
"""
self._manager.cluster_template_destroy(context,
_get_id(cluster_template),
ignore_default)
@r.wrap(r.ClusterTemplateResource)
def cluster_template_update(self, context, id, cluster_template,
ignore_default=False):
"""Update the cluster template or raise if it does not exist.
:returns: the updated cluster template
"""
return self._manager.cluster_template_update(context,
id,
cluster_template,
ignore_default)
# Node Group Template ops
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_get(self, context, node_group_template):
"""Return the node group template or None if it does not exist."""
return self._manager.node_group_template_get(
context, _get_id(node_group_template))
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_get_all(self, context, **kwargs):
"""Get all node group templates filtered by **kwargs.
e.g. node_group_template_get_all(plugin_name='vanilla',
hadoop_version='1.1')
"""
return self._manager.node_group_template_get_all(context, **kwargs)
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_create(self, context, values):
"""Create a node group template from the values dictionary.
:returns: the created node group template
"""
return self._manager.node_group_template_create(context, values)
def node_group_template_destroy(self, context, node_group_template,
ignore_default=False):
"""Destroy the node group template or raise if it does not exist.
:returns: None
"""
self._manager.node_group_template_destroy(context,
_get_id(node_group_template),
ignore_default)
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_update(self, context, id, values,
ignore_default=False):
"""Update a node group template from the values dictionary.
:returns: the updated node group template
"""
return self._manager.node_group_template_update(context, id, values,
ignore_default)
# Data Source ops
@r.wrap(r.DataSource)
def data_source_get(self, context, data_source):
"""Return the Data Source or None if it does not exist."""
return self._manager.data_source_get(context, _get_id(data_source))
@r.wrap(r.DataSource)
def data_source_get_all(self, context, **kwargs):
"""Get all Data Sources filtered by **kwargs.
e.g. data_source_get_all(name='myfile', type='swift')
"""
return self._manager.data_source_get_all(context, **kwargs)
def data_source_count(self, context, **kwargs):
"""Count Data Sources filtered by **kwargs.
Uses sqlalchemy "in_" clause for any tuple values
Uses sqlalchemy "like" clause for any string values containing %
"""
return self._manager.data_source_count(context, **kwargs)
@r.wrap(r.DataSource)
def data_source_create(self, context, values):
"""Create a Data Source from the values dictionary."""
return self._manager.data_source_create(context, values)
def data_source_destroy(self, context, data_source):
"""Destroy the Data Source or raise if it does not exist."""
self._manager.data_source_destroy(context, _get_id(data_source))
@r.wrap(r.DataSource)
def data_source_update(self, context, id, values):
"""Update an existing Data Source"""
return self._manager.data_source_update(context, id, values)
# JobExecution ops
@r.wrap(r.JobExecution)
def job_execution_get(self, context, job_execution):
"""Return the JobExecution or None if it does not exist."""
return self._manager.job_execution_get(context,
_get_id(job_execution))
@r.wrap(r.JobExecution)
def job_execution_get_all(self, context, **kwargs):
"""Get all JobExecutions filtered by **kwargs.
kwargs key values may be the names of fields in a JobExecution
plus the following special values with the indicated meaning:
'cluster.name' -- name of the Cluster referenced by the JobExecution
'job.name' -- name of the Job referenced by the JobExecution
'status' -- JobExecution['info']['status']
e.g. job_execution_get_all(cluster_id=12, input_id=123)
job_execution_get_all(**{'cluster.name': 'test',
'job.name': 'wordcount'})
"""
return self._manager.job_execution_get_all(context, **kwargs)
def job_execution_count(self, context, **kwargs):
"""Count number of JobExecutions filtered by **kwargs.
e.g. job_execution_count(cluster_id=12, input_id=123)
"""
return self._manager.job_execution_count(context, **kwargs)
@r.wrap(r.JobExecution)
def job_execution_create(self, context, values):
"""Create a JobExecution from the values dictionary."""
return self._manager.job_execution_create(context, values)
@r.wrap(r.JobExecution)
def job_execution_update(self, context, job_execution, values):
"""Update the JobExecution or raise if it does not exist."""
return self._manager.job_execution_update(context,
_get_id(job_execution),
values)
def job_execution_destroy(self, context, job_execution):
"""Destroy the JobExecution or raise if it does not exist."""
self._manager.job_execution_destroy(context, _get_id(job_execution))
# Job ops
@r.wrap(r.Job)
def job_get(self, context, job):
"""Return the Job or None if it does not exist."""
return self._manager.job_get(context, _get_id(job))
@r.wrap(r.Job)
def job_get_all(self, context, **kwargs):
"""Get all Jobs filtered by **kwargs.
e.g. job_get_all(name='myjob', type='MapReduce')
"""
return self._manager.job_get_all(context, **kwargs)
@r.wrap(r.Job)
def job_create(self, context, values):
"""Create a Job from the values dictionary."""
return self._manager.job_create(context, values)
def job_update(self, context, job, values):
"""Update the Job or raise if it does not exist."""
return self._manager.job_update(context, _get_id(job),
values)
def job_destroy(self, context, job):
"""Destroy the Job or raise if it does not exist."""
self._manager.job_destroy(context, _get_id(job))
def job_main_name(self, context, job):
"""Return the name of the first main JobBinary or None.
At present the 'mains' element is expected to contain a single element.
In the future if 'mains' contains more than one element we will need
a scheme or convention for retrieving a name from the list of binaries.
:param job: This is expected to be a Job object
"""
if job.mains:
binary = self.job_binary_get(context, job.mains[0])
if binary is not None:
return binary["name"]
return None
def job_lib_names(self, context, job):
"""Return the name of all job lib binaries or an empty list.
:param job: This is expected to be a Job object
"""
lib_ids = job.libs or []
binaries = (self.job_binary_get(context, lib_id) for lib_id in lib_ids)
return [binary["name"] for binary in binaries if binary is not None]
# JobBinary ops
@r.wrap(r.JobBinary)
def job_binary_get_all(self, context, **kwargs):
"""Get all JobBinarys filtered by **kwargs.
e.g. job_binary_get_all(name='wordcount.jar')
"""
return self._manager.job_binary_get_all(context, **kwargs)
@r.wrap(r.JobBinary)
def job_binary_get(self, context, job_binary):
"""Return the JobBinary or None if it does not exist."""
return self._manager.job_binary_get(context, _get_id(job_binary))
@r.wrap(r.JobBinary)
def job_binary_create(self, context, values):
"""Create a JobBinary from the values dictionary."""
return self._manager.job_binary_create(context, values)
def job_binary_destroy(self, context, job_binary):
"""Destroy the JobBinary or raise if it does not exist."""
self._manager.job_binary_destroy(context, _get_id(job_binary))
@r.wrap(r.JobBinary)
def job_binary_update(self, context, id, values):
"""Update a JobBinary from the values dictionary."""
return self._manager.job_binary_update(context, id, values)
# JobBinaryInternal ops
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_get_all(self, context, **kwargs):
"""Get all JobBinaryInternals filtered by **kwargs.
e.g. cluster_get_all(name='wordcount.jar')
"""
return self._manager.job_binary_internal_get_all(context, **kwargs)
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_get(self, context, job_binary_internal):
"""Return the JobBinaryInternal or None if it does not exist."""
return self._manager.job_binary_internal_get(
context,
_get_id(job_binary_internal))
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_create(self, context, values):
"""Create a JobBinaryInternal from the values dictionary."""
return self._manager.job_binary_internal_create(context, values)
def job_binary_internal_destroy(self, context, job_binary_internal_id):
"""Destroy the JobBinaryInternal or raise if it does not exist."""
self._manager.job_binary_internal_destroy(
context,
_get_id(job_binary_internal_id))
def job_binary_internal_get_raw_data(self, context,
job_binary_internal_id):
"""Return the binary data field from a JobBinaryInternal."""
return self._manager.job_binary_internal_get_raw_data(
context,
job_binary_internal_id)
# Events ops
def cluster_provision_step_add(self, context, cluster_id, values):
"""Create a provisioning step assigned to cluster from values dict."""
return self._manager.cluster_provision_step_add(
context, cluster_id, values)
def cluster_provision_step_update(self, context, provision_step):
"""Update the cluster provisioning step."""
return self._manager.cluster_provision_step_update(
context, provision_step)
def cluster_provision_progress_update(self, context, cluster_id):
"""Return cluster with provision progress updated field."""
return self._manager.cluster_provision_progress_update(
context, cluster_id)
def cluster_event_add(self, context, provision_step, values):
"""Assign new event to the specified provision step."""
return self._manager.cluster_event_add(
context, provision_step, values)
class RemoteApi(LocalApi):
"""Conductor API that does updates via RPC to the ConductorManager."""
# TODO(slukjanov): it should override _manager and only necessary functions
| apache-2.0 | -6,486,253,410,570,346,000 | -3,681,067,405,627,449,300 | 36.399582 | 79 | 0.618001 | false |
sdague/home-assistant | homeassistant/components/canary/config_flow.py | 9 | 3968 | """Config flow for Canary."""
import logging
from typing import Any, Dict, Optional
from canary.api import Api
from requests import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.config_entries import CONN_CLASS_CLOUD_POLL, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS, DEFAULT_TIMEOUT
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
def validate_input(hass: HomeAssistantType, data: dict) -> Dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
# constructor does login call
Api(
data[CONF_USERNAME],
data[CONF_PASSWORD],
data.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
return True
class CanaryConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Canary."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_CLOUD_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return CanaryOptionsFlowHandler(config_entry)
async def async_step_import(
self, user_input: Optional[ConfigType] = None
) -> Dict[str, Any]:
"""Handle a flow initiated by configuration file."""
return await self.async_step_user(user_input)
async def async_step_user(
self, user_input: Optional[ConfigType] = None
) -> Dict[str, Any]:
"""Handle a flow initiated by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
errors = {}
default_username = ""
if user_input is not None:
if CONF_TIMEOUT not in user_input:
user_input[CONF_TIMEOUT] = DEFAULT_TIMEOUT
default_username = user_input[CONF_USERNAME]
try:
await self.hass.async_add_executor_job(
validate_input, self.hass, user_input
)
except (ConnectTimeout, HTTPError):
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
else:
return self.async_create_entry(
title=user_input[CONF_USERNAME],
data=user_input,
)
data_schema = {
vol.Required(CONF_USERNAME, default=default_username): str,
vol.Required(CONF_PASSWORD): str,
}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data_schema),
errors=errors or {},
)
class CanaryOptionsFlowHandler(OptionsFlow):
"""Handle Canary client options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input: Optional[ConfigType] = None):
"""Manage Canary options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = {
vol.Optional(
CONF_FFMPEG_ARGUMENTS,
default=self.config_entry.options.get(
CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS
),
): str,
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
): int,
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
| apache-2.0 | 2,721,558,777,100,425,700 | -2,353,579,503,554,113,500 | 31.793388 | 87 | 0.613911 | false |
marcosfede/algorithms | adventofcode/2018/21/d21.py | 1 | 2031 |
def addr(r, a, b):
return r[a]+r[b]
def addi(r, a, b):
return r[a]+b
def mulr(r, a, b):
return r[a]*r[b]
def muli(r, a, b):
return r[a] * b
def banr(r, a, b):
return r[a] & r[b]
def bani(r, a, b):
return r[a] & b
def borr(r, a, b):
return r[a] | r[b]
def bori(r, a, b):
return r[a] | b
def setr(r, a, b):
return r[a]
def seti(r, a, b):
return a
def gtir(r, a, b):
return 1 if a > r[b] else 0
def grti(r, a, b):
return 1 if r[a] > b else 0
def gtrr(r, a, b):
return 1 if r[a] > r[b] else 0
def eqir(r, a, b):
return 1 if a == r[b] else 0
def eqri(r, a, b):
return 1 if r[a] == b else 0
def eqrr(r, a, b):
return 1 if r[a] == r[b] else 0
def apply_opcode(opcode, registers, a, b, c):
newregisters = registers[:]
newregisters[c] = opcode(registers, a, b)
return newregisters
opcodes = [addr, addi, mulr, muli, banr, bani, borr,
bori, setr, seti, gtir, grti, gtrr, eqir, eqri, eqrr]
opcodes_by_name = {op.__name__: op for op in opcodes}
program = []
with open('./21/input.txt') as f:
pregister = int(f.readline()[4])
for line in f:
op, a, b, c = line.split(' ')
program.append((op, int(a), int(b), int(c)))
# p1
pointer = 0
registers = [0, 0, 0, 0, 0, 0]
seen = set()
while 0 <= pointer < len(program):
registers[pregister] = pointer
op, a, b, c = program[pointer]
registers = apply_opcode(opcodes_by_name[op], registers, a, b, c)
pointer = registers[pregister]
pointer += 1
if pointer == 28:
if registers[5] in seen:
print(registers[5])
break
seen.add(registers[5])
print(registers[5])
print(registers[0])
###
# so basically inspecing the assembly code you realize that the only line where register 0 is used is on line 28 which sets a termination condition.
# when r[0] == r[5] the program halts
# p1 is the value of r[5] when it first reaches l28,
# p2 is the last value of r[5] before it cycles indefinitely
| gpl-3.0 | 2,705,365,595,145,269,000 | 4,396,395,416,761,286,000 | 17.981308 | 148 | 0.573117 | false |
espadrine/opera | chromium/src/third_party/python_26/Lib/site-packages/win32/Demos/rastest.py | 17 | 4789 | # rastest.py - test/demonstrate the win32ras module.
# Much of the code here contributed by Jethro Wright.
import sys
import string
import os
import win32ras
# Build a little dictionary of RAS states to decent strings.
# eg win32ras.RASCS_OpenPort -> "OpenPort"
stateMap = {}
for name, val in win32ras.__dict__.items():
if name[:6]=="RASCS_":
stateMap[val] = name[6:]
# Use a lock so the callback can tell the main thread when it is finished.
import win32event
callbackEvent = win32event.CreateEvent(None, 0, 0, None)
def Callback( hras, msg, state, error, exterror):
# print "Callback called with ", hras, msg, state, error, exterror
stateName = stateMap.get(state, "Unknown state?")
print "Status is %s (%04lx), error code is %d" % (stateName, state, error)
finished = state in [win32ras.RASCS_Connected]
if finished:
win32event.SetEvent(callbackEvent)
if error != 0 or int( state ) == win32ras.RASCS_Disconnected:
# we know for sure this is a good place to hangup....
print "Detected call failure: %s" % win32ras.GetErrorString( error )
HangUp( hras )
win32event.SetEvent(callbackEvent)
def ShowConnections():
print "All phone-book entries:"
for (name,) in win32ras.EnumEntries():
print " ", name
print "Current Connections:"
for con in win32ras.EnumConnections():
print " ", con
def EditEntry(entryName):
try:
win32ras.EditPhonebookEntry(0,None,entryName)
except win32ras.error, (rc, function, msg):
print "Can not edit/find the RAS entry -", msg
def HangUp( hras ):
# trap potential, irrelevant errors from win32ras....
try:
win32ras.HangUp( hras )
except:
print "Tried to hang up gracefully on error, but didn't work...."
return None
def Connect(entryName, bUseCallback):
if bUseCallback:
theCallback = Callback
win32event.ResetEvent(callbackEvent)
else:
theCallback = None
# in order to *use* the username/password of a particular dun entry, one must
# explicitly get those params under win95....
try:
dp, b = win32ras.GetEntryDialParams( None, entryName )
except:
print "Couldn't find DUN entry: %s" % entryName
else:
hras, rc = win32ras.Dial(None, None, (entryName, "", "", dp[ 3 ], dp[ 4 ], ""),theCallback)
# hras, rc = win32ras.Dial(None, None, (entryName, ),theCallback)
# print hras, rc
if not bUseCallback and rc <> 0:
print "Could not dial the RAS connection:", win32ras.GetErrorString(rc)
hras = HangUp( hras )
# don't wait here if there's no need to....
elif bUseCallback and win32event.WaitForSingleObject(callbackEvent, 60000)!=win32event.WAIT_OBJECT_0:
print "Gave up waiting for the process to complete!"
# sdk docs state one must explcitly hangup, even if there's an error....
try:
cs = win32ras.GetConnectStatus( hras )
except:
# on error, attempt a hang up anyway....
hras = HangUp( hras )
else:
if int( cs[ 0 ] ) == win32ras.RASCS_Disconnected:
hras = HangUp( hras )
return hras, rc
def Disconnect( rasEntry ):
# Need to find the entry
name = string.lower( rasEntry )
for hcon, entryName, devName, devType in win32ras.EnumConnections():
if string.lower( entryName ) == name:
win32ras.HangUp( hcon )
print "Disconnected from", rasEntry
break
else:
print "Could not find an open connection to", entryName
usage = """
Usage: %s [-s] [-l] [-c connection] [-d connection]
-l : List phone-book entries and current connections.
-s : Show status while connecting/disconnecting (uses callbacks)
-c : Connect to the specified phonebook name.
-d : Disconnect from the specified phonebook name.
-e : Edit the specified phonebook entry.
"""
def main():
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "slc:d:e:")
except getopt.error, why:
print why
print usage % (os.path.basename(sys.argv[0],))
return
bCallback = 0
if args or not opts:
print usage % (os.path.basename(sys.argv[0],))
return
for opt, val in opts:
if opt=="-s":
bCallback = 1
if opt=="-l":
ShowConnections()
if opt=="-c":
hras, rc = Connect(val, bCallback)
if hras != None:
print "hras: 0x%8lx, rc: 0x%04x" % ( hras, rc )
if opt=="-d":
Disconnect(val)
if opt=="-e":
EditEntry(val)
if __name__=='__main__':
main()
| bsd-3-clause | -5,051,477,663,609,632,000 | -6,612,675,888,153,033,000 | 34.213235 | 109 | 0.605972 | false |
MER-GROUP/intellij-community | python/lib/Lib/site-packages/django/contrib/messages/tests/user_messages.py | 241 | 2619 | from django import http
from django.contrib.auth.models import User
from django.contrib.messages.storage.user_messages import UserMessagesStorage,\
LegacyFallbackStorage
from django.contrib.messages.tests.base import skipUnlessAuthIsInstalled
from django.contrib.messages.tests.cookie import set_cookie_data
from django.contrib.messages.tests.fallback import FallbackTest
from django.test import TestCase
class UserMessagesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='tester')
def test_add(self):
storage = UserMessagesStorage(http.HttpRequest())
self.assertRaises(NotImplementedError, storage.add, 'Test message 1')
def test_get_anonymous(self):
# Ensure that the storage still works if no user is attached to the
# request.
storage = UserMessagesStorage(http.HttpRequest())
self.assertEqual(len(storage), 0)
def test_get(self):
storage = UserMessagesStorage(http.HttpRequest())
storage.request.user = self.user
self.user.message_set.create(message='test message')
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'test message')
UserMessagesTest = skipUnlessAuthIsInstalled(UserMessagesTest)
class LegacyFallbackTest(FallbackTest, TestCase):
storage_class = LegacyFallbackStorage
def setUp(self):
super(LegacyFallbackTest, self).setUp()
self.user = User.objects.create(username='tester')
def get_request(self, *args, **kwargs):
request = super(LegacyFallbackTest, self).get_request(*args, **kwargs)
request.user = self.user
return request
def test_get_legacy_only(self):
request = self.get_request()
storage = self.storage_class(request)
self.user.message_set.create(message='user message')
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'user message')
def test_get_legacy(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
self.user.message_set.create(message='user message')
set_cookie_data(cookie_storage, ['cookie'])
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 2)
self.assertEqual(list(storage)[0].message, 'user message')
self.assertEqual(list(storage)[1], 'cookie')
LegacyFallbackTest = skipUnlessAuthIsInstalled(LegacyFallbackTest)
| apache-2.0 | 4,369,457,313,906,611,000 | 2,535,505,982,507,008,500 | 36.414286 | 79 | 0.704849 | false |
imtapps/django-imt-fork | tests/regressiontests/string_lookup/models.py | 113 | 1457 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __str__(self):
return "Foo %s" % self.name
@python_2_unicode_compatible
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, related_name='normal_foo')
fwd = models.ForeignKey("Whiz")
back = models.ForeignKey("Foo")
def __str__(self):
return "Bar %s" % self.place.name
@python_2_unicode_compatible
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Whiz %s" % self.name
@python_2_unicode_compatible
class Child(models.Model):
parent = models.OneToOneField('Base')
name = models.CharField(max_length=50)
def __str__(self):
return "Child %s" % self.name
@python_2_unicode_compatible
class Base(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Base %s" % self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.IPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
| bsd-3-clause | 5,726,155,784,032,739,000 | 6,455,323,815,438,636,000 | 25.490909 | 65 | 0.669183 | false |
Phoenix1369/site | judge/models/interface.py | 1 | 3940 | import re
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.fields import TreeForeignKey
from mptt.models import MPTTModel
from judge.models.problem import Problem
from judge.models.profile import Profile
__all__ = ['MiscConfig', 'validate_regex', 'NavigationBar', 'BlogPost', 'Solution']
class MiscConfig(models.Model):
key = models.CharField(max_length=30, db_index=True)
value = models.TextField(blank=True)
def __unicode__(self):
return self.key
class Meta:
verbose_name = _('configuration item')
verbose_name_plural = _('miscellaneous configuration')
def validate_regex(regex):
try:
re.compile(regex, re.VERBOSE)
except re.error as e:
raise ValidationError('Invalid regex: %s' % e.message)
class NavigationBar(MPTTModel):
class Meta:
verbose_name = _('navigation item')
verbose_name_plural = _('navigation bar')
class MPTTMeta:
order_insertion_by = ['order']
order = models.PositiveIntegerField(db_index=True, verbose_name=_('order'))
key = models.CharField(max_length=10, unique=True, verbose_name=_('identifier'))
label = models.CharField(max_length=20, verbose_name=_('label'))
path = models.CharField(max_length=255, verbose_name=_('link path'))
regex = models.TextField(verbose_name=_('highlight regex'), validators=[validate_regex])
parent = TreeForeignKey('self', verbose_name=_('parent item'), null=True, blank=True, related_name='children')
def __unicode__(self):
return self.label
@property
def pattern(self, cache={}):
# A cache with a bad policy is an alias for memory leak
# Thankfully, there will never be too many regexes to cache.
if self.regex in cache:
return cache[self.regex]
else:
pattern = cache[self.regex] = re.compile(self.regex, re.VERBOSE)
return pattern
class BlogPost(models.Model):
title = models.CharField(verbose_name=_('post title'), max_length=100)
authors = models.ManyToManyField(Profile, verbose_name=_('authors'), blank=True)
slug = models.SlugField(verbose_name=_('slug'))
visible = models.BooleanField(verbose_name=_('public visibility'), default=False)
sticky = models.BooleanField(verbose_name=_('sticky'), default=False)
publish_on = models.DateTimeField(verbose_name=_('publish after'))
content = models.TextField(verbose_name=_('post content'))
summary = models.TextField(verbose_name=_('post summary'), blank=True)
og_image = models.CharField(verbose_name=_('openGraph image'), default='', max_length=150, blank=True)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('blog_post', args=(self.id, self.slug))
class Meta:
permissions = (
('see_hidden_post', 'See hidden posts'),
)
verbose_name = _('blog post')
verbose_name_plural = _('blog posts')
class Solution(models.Model):
url = models.CharField('URL', max_length=100, db_index=True, blank=True)
title = models.CharField(max_length=200)
is_public = models.BooleanField(default=False)
publish_on = models.DateTimeField()
content = models.TextField()
authors = models.ManyToManyField(Profile, blank=True)
problem = models.ForeignKey(Problem, on_delete=models.SET_NULL, verbose_name=_('associated problem'),
null=True, blank=True)
def get_absolute_url(self):
return reverse('solution', args=[self.url])
def __unicode__(self):
return self.title
class Meta:
permissions = (
('see_private_solution', 'See hidden solutions'),
)
verbose_name = _('solution')
verbose_name_plural = _('solutions')
| agpl-3.0 | -9,011,134,475,252,437,000 | 7,310,983,049,297,379,000 | 34.818182 | 114 | 0.661421 | false |
bccp/nbodykit | nbodykit/tutorials/halos.py | 2 | 4020 | from nbodykit.source.catalog import HaloCatalog, HDFCatalog
from nbodykit import CurrentMPIComm, transform
from nbodykit.cosmology import Cosmology
class DemoHaloCatalog(HaloCatalog):
"""
Create a demo catalog of halos using one of the built-in :mod:`halotools`
catalogs.
.. note::
The first request for a particular catalog will download the data
and cache in the ``~/.astropy/cache/halotools`` directory.
Parameters
----------
simname : string
Nickname of the simulation. Currently supported simulations are
Bolshoi (simname = ``bolshoi``), Consuelo (simname = ``consuelo``),
MultiDark (simname = ``multidark``), and Bolshoi-Planck (simname = ``bolplanck``).
halo_finder : string
Nickname of the halo-finder, e.g. ``rockstar`` or ``bdm``.
redshift : float
Redshift of the requested snapshot.
Must match one of the available snapshots within ``dz_tol=0.1``,
or a prompt will be issued providing the nearest
available snapshots to choose from.
Examples
--------
>>> from nbodykit.tutorials import DemoHaloCatalog
>>> halos = DemoHaloCatalog('bolshoi', 'rockstar', 0.5)
>>> print(halos.columns)
"""
@CurrentMPIComm.enable
def __init__(self, simname, halo_finder, redshift, comm=None):
from halotools.sim_manager import CachedHaloCatalog, DownloadManager
from halotools.sim_manager.supported_sims import supported_sim_dict
# do seme setup
self.comm = comm
meta_cols = ['Lbox', 'redshift', 'particle_mass']
# try to automatically load from the Halotools cache
exception = None
if self.comm.rank == 0:
kws = {'simname':simname, 'halo_finder':halo_finder, 'redshift':redshift}
try:
cached_halos = CachedHaloCatalog(dz_tol=0.1, **kws)
fname = cached_halos.fname # the filename to load
meta = {k:getattr(cached_halos, k) for k in meta_cols}
except Exception as e:
# try to download on the root rank
try:
# download
dl = DownloadManager()
dl.download_processed_halo_table(dz_tol=0.1, **kws)
# access the cached halo catalog and get fname attribute
# NOTE: this does not read the data
cached_halos = CachedHaloCatalog(dz_tol=0.1, **kws)
fname = cached_halos.fname
meta = {k:getattr(cached_halos, k) for k in meta_cols}
except Exception as e:
exception = e
else:
fname = None
meta = None
# re-raise a download error on all ranks if it occurred
exception = self.comm.bcast(exception, root=0)
if exception is not None:
raise exception
# broadcast the file we are loading
fname = self.comm.bcast(fname, root=0)
meta = self.comm.bcast(meta, root=0)
# initialize an HDF catalog and add Position/Velocity
cat = HDFCatalog(fname, comm=comm)
cat['Position'] = transform.StackColumns(cat['halo_x'], cat['halo_y'], cat['halo_z'])
cat['Velocity'] = transform.StackColumns(cat['halo_vx'], cat['halo_vy'], cat['halo_vz'])
# get the cosmology from Halotools
cosmo = supported_sim_dict[simname]().cosmology # this is astropy cosmology
cosmo = Cosmology.from_astropy(cosmo)
# initialize the HaloCatalog
HaloCatalog.__init__(self, cat, cosmo, meta['redshift'], mdef='vir', mass='halo_mvir')
# add some meta-data
# NOTE: all Halotools catalogs have to these attributes
self.attrs['BoxSize'] = meta['Lbox']
self.attrs['redshift'] = meta['redshift']
self.attrs['particle_mass'] = meta['particle_mass']
# save the cosmology
self.cosmo = cosmo
self.attrs['cosmo'] = dict(self.cosmo)
| gpl-3.0 | 6,313,647,335,468,115,000 | -4,204,259,707,582,283,000 | 39.2 | 96 | 0.601493 | false |
stef1927/cassandra-dtest | thrift_test.py | 5 | 148047 | import re
import struct
import time
import uuid
import pytest
import logging
import codecs
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TApplicationException
from thrift.transport import TSocket, TTransport
from tools.assertions import assert_length_equal
from tools.misc import ImmutableMapping
from dtest_setup_overrides import DTestSetupOverrides
from dtest import Tester
from thrift_bindings.thrift010 import Cassandra
from thrift_bindings.thrift010.Cassandra import (CfDef, Column, ColumnDef,
ColumnOrSuperColumn, ColumnParent,
ColumnPath, ColumnSlice,
ConsistencyLevel, CounterColumn,
Deletion, IndexExpression,
IndexOperator, IndexType,
InvalidRequestException, KeyRange,
KeySlice, KsDef, MultiSliceRequest,
Mutation, NotFoundException,
SlicePredicate, SliceRange,
SuperColumn)
from tools.assertions import (assert_all, assert_none, assert_one)
MAX_TTL = 20 * 365 * 24 * 60 * 60 # 20 years in seconds
since = pytest.mark.since
logger = logging.getLogger(__name__)
utf8encoder = codecs.getencoder('utf-8')
def utf8encode(str):
return utf8encoder(str)[0]
def get_thrift_client(host='127.0.0.1', port=9160):
socket = TSocket.TSocket(host, port)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Cassandra.Client(protocol)
client.transport = transport
return client
client = None
pid_fname = "system_test.pid"
def pid():
return int(open(pid_fname).read())
@since('2.0', max_version='4')
class TestThrift(Tester):
@pytest.fixture(scope='function', autouse=True)
def fixture_dtest_setup_overrides(self, dtest_config):
dtest_setup_overrides = DTestSetupOverrides()
"""
@jira_ticket CASSANDRA-7653
"""
dtest_setup_overrides.cluster_options = ImmutableMapping(
{'partitioner': 'org.apache.cassandra.dht.ByteOrderedPartitioner',
'start_rpc': 'true'})
return dtest_setup_overrides
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
fixture_dtest_setup.cluster.populate(1)
node1, = fixture_dtest_setup.cluster.nodelist()
# If vnodes are not used, we must set our own initial_token
# Because ccm will not set a hex token for ByteOrderedPartitioner
# automatically. It does not matter what token we set as we only
# ever use one node.
if not fixture_dtest_setup.dtest_config.use_vnodes:
node1.set_configuration_options(values={'initial_token': 'abcd'})
# CASSANDRA-14092 - prevent max ttl tests from failing
fixture_dtest_setup.cluster.start(jvm_args=['-Dcassandra.expiration_date_overflow_policy=CAP',
'-Dcassandra.expiration_overflow_warning_interval_minutes=0'],
wait_for_binary_proto=True)
fixture_dtest_setup.cluster.nodelist()[0].watch_log_for("Listening for thrift clients") # Wait for the thrift port to open
time.sleep(0.1)
# this is ugly, but the whole test module is written against a global client
global client
client = get_thrift_client()
client.transport.open()
self.define_schema()
yield client
client.transport.close()
def define_schema(self):
keyspace1 = Cassandra.KsDef('Keyspace1', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'},
cf_defs=[
Cassandra.CfDef('Keyspace1', 'Standard1'),
Cassandra.CfDef('Keyspace1', 'Standard2'),
Cassandra.CfDef('Keyspace1', 'Standard3', column_metadata=[Cassandra.ColumnDef(utf8encode('c1'), 'AsciiType'), Cassandra.ColumnDef(utf8encode('c2'), 'AsciiType')]),
Cassandra.CfDef('Keyspace1', 'Standard4', column_metadata=[Cassandra.ColumnDef(utf8encode('c1'), 'AsciiType')]),
Cassandra.CfDef('Keyspace1', 'StandardLong1', comparator_type='LongType'),
Cassandra.CfDef('Keyspace1', 'StandardInteger1', comparator_type='IntegerType'),
Cassandra.CfDef('Keyspace1', 'StandardComposite', comparator_type='CompositeType(AsciiType, AsciiType)'),
Cassandra.CfDef('Keyspace1', 'Super1', column_type='Super', subcomparator_type='LongType'),
Cassandra.CfDef('Keyspace1', 'Super2', column_type='Super', subcomparator_type='LongType'),
Cassandra.CfDef('Keyspace1', 'Super3', column_type='Super', comparator_type='LongType', subcomparator_type='UTF8Type'),
Cassandra.CfDef('Keyspace1', 'Counter1', default_validation_class='CounterColumnType'),
Cassandra.CfDef('Keyspace1', 'SuperCounter1', column_type='Super', default_validation_class='CounterColumnType'),
Cassandra.CfDef('Keyspace1', 'Indexed1', column_metadata=[Cassandra.ColumnDef(utf8encode('birthdate'), 'LongType', Cassandra.IndexType.KEYS, 'birthdate_index')]),
Cassandra.CfDef('Keyspace1', 'Indexed2', comparator_type='TimeUUIDType', column_metadata=[Cassandra.ColumnDef(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, 'LongType', Cassandra.IndexType.KEYS)]),
Cassandra.CfDef('Keyspace1', 'Indexed3', comparator_type='TimeUUIDType', column_metadata=[Cassandra.ColumnDef(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, 'UTF8Type', Cassandra.IndexType.KEYS)]),
Cassandra.CfDef('Keyspace1', 'Indexed4', column_metadata=[Cassandra.ColumnDef(utf8encode('a'), 'LongType', Cassandra.IndexType.KEYS, 'a_index'), Cassandra.ColumnDef(utf8encode('z'), 'UTF8Type')]),
Cassandra.CfDef('Keyspace1', 'Expiring', default_time_to_live=2),
Cassandra.CfDef('Keyspace1', 'ExpiringMaxTTL', default_time_to_live=MAX_TTL)
])
keyspace2 = Cassandra.KsDef('Keyspace2', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'},
cf_defs=[
Cassandra.CfDef('Keyspace2', 'Standard1'),
Cassandra.CfDef('Keyspace2', 'Standard3'),
Cassandra.CfDef('Keyspace2', 'Super3', column_type='Super', subcomparator_type='BytesType'),
Cassandra.CfDef('Keyspace2', 'Super4', column_type='Super', subcomparator_type='TimeUUIDType'), ])
for ks in [keyspace1, keyspace2]:
client.system_add_keyspace(ks)
def i64(n):
return _i64(n)
def i32(n):
return _i32(n)
def i16(n):
return _i16(n)
def composite(item1, item2=None, eoc=b'\x00'):
if isinstance(item1, str):
item1 = utf8encode(item1)
if isinstance(item2, str):
item2 = utf8encode(item2)
if isinstance(eoc, str):
eoc = utf8encode(eoc)
packed = _i16(len(item1)) + item1 + eoc
if item2 is not None:
packed += _i16(len(item2)) + item2
packed += eoc
return packed
def _i64(n):
return struct.pack('>q', n) # big endian = network order
def _i32(n):
return struct.pack('>i', n) # big endian = network order
def _i16(n):
return struct.pack('>h', n) # big endian = network order
_SIMPLE_COLUMNS = [Column(utf8encode('c1'), utf8encode('value1'), 0),
Column(utf8encode('c2'), utf8encode('value2'), 0)]
_SUPER_COLUMNS = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'), columns=[Column(_i64(5), utf8encode('value5'), 0),
Column(_i64(6), utf8encode('value6'), 0)])]
def _assert_column(column_family, key, column, value, ts=0):
if isinstance(key, str):
key = utf8encode(key)
if isinstance(value, str):
value = utf8encode(value)
try:
assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
except NotFoundException:
raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value))
def _assert_columnpath_exists(key, column_path):
if isinstance(key, str):
key = utf8encode(key)
try:
assert client.get(key, column_path, ConsistencyLevel.ONE)
except NotFoundException:
raise Exception('expected %s with %s but was not present.' % (key, column_path))
def _assert_no_columnpath(key, column_path):
if isinstance(key, str):
key = utf8encode(key)
try:
client.get(key, column_path, ConsistencyLevel.ONE)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def _insert_simple():
return _insert_multi([utf8encode('key1')])
def _insert_multi(keys):
CL = ConsistencyLevel.ONE
for key in keys:
if isinstance(key, str):
key = utf8encode(key)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('c2'), utf8encode('value2'), 0), CL)
def _insert_batch():
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
client.batch_mutate({utf8encode('key1'): cfmap}, ConsistencyLevel.ONE)
def _big_slice(key, column_parent):
if isinstance(key, str):
key = utf8encode(key)
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
def _big_multislice(keys, column_parent):
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
def _verify_batch():
_verify_simple()
L = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard2'))]
assert L == _SIMPLE_COLUMNS, L
def _verify_simple():
assert client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c1')), ConsistencyLevel.ONE).column == Column(utf8encode('c1'), utf8encode('value1'), 0)
L = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard1'))]
assert L == _SIMPLE_COLUMNS, L
def _insert_super(key='key1'):
if isinstance(key, str):
key = utf8encode(key)
client.insert(key, ColumnParent('Super1', utf8encode('sc1')), Column(_i64(4), utf8encode('value4'), 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', utf8encode('sc2')), Column(_i64(6), utf8encode('value6'), 0), ConsistencyLevel.ONE)
def _insert_range():
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c2'), utf8encode('value2'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c3'), utf8encode('value3'), 0), ConsistencyLevel.ONE)
def _verify_range():
p = SlicePredicate(slice_range=SliceRange(utf8encode('c1'), utf8encode('c2'), False, 1000))
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == utf8encode('c1')
assert result[1].column.name == utf8encode('c2')
p = SlicePredicate(slice_range=SliceRange(utf8encode('c3'), utf8encode('c2'), True, 1000))
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == utf8encode('c3')
assert result[1].column.name == utf8encode('c2')
p = SlicePredicate(slice_range=SliceRange(utf8encode('a'), utf8encode('z'), False, 1000))
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange(utf8encode('a'), utf8encode('z'), False, 2))
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _set_keyspace(keyspace):
client.set_keyspace(keyspace)
def _insert_super_range():
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1')), Column(_i64(4), utf8encode('value4'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(6), utf8encode('value6'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc3')), Column(_i64(7), utf8encode('value7'), 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_super_range():
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc2'), utf8encode('sc3'), False, 2))
result = client.get_slice(utf8encode('key1'), ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == utf8encode('sc2')
assert result[1].super_column.name == utf8encode('sc3')
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc3'), utf8encode('sc2'), True, 2))
result = client.get_slice(utf8encode('key1'), ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == utf8encode('sc3')
assert result[1].super_column.name == utf8encode('sc2')
def _verify_super(supercf='Super1', key='key1'):
if isinstance(key, str):
key = utf8encode(key)
assert client.get(key, ColumnPath(supercf, utf8encode('sc1'), _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), utf8encode('value4'), 0)
slice = [result.super_column
for result in _big_slice(key, ColumnParent('Super1'))]
assert slice == _SUPER_COLUMNS, slice
def _expect_exception(fn, type_):
try:
r = fn()
except type_ as t:
return t
else:
raise Exception('expected %s; got %s' % (type_.__name__, r))
def _expect_missing(fn):
_expect_exception(fn, NotFoundException)
def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
kr = KeyRange(start, end, count=count, row_filter=row_filter)
return client.get_range_slices(parent, predicate, kr, cl)
def _insert_six_columns(key='abc'):
if isinstance(key, str):
key = utf8encode(key)
CL = ConsistencyLevel.ONE
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('a'), utf8encode('1'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('b'), utf8encode('2'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('c'), utf8encode('3'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('d'), utf8encode('4'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('e'), utf8encode('5'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('f'), utf8encode('6'), 0), CL)
def _big_multi_slice(key='abc'):
if isinstance(key, str):
key = utf8encode(key)
c1 = ColumnSlice()
c1.start = utf8encode('a')
c1.finish = utf8encode('c')
c2 = ColumnSlice()
c2.start = utf8encode('e')
c2.finish = utf8encode('f')
m = MultiSliceRequest()
m.key = key
m.column_parent = ColumnParent('Standard1')
m.column_slices = [c1, c2]
m.reversed = False
m.count = 10
m.consistency_level = ConsistencyLevel.ONE
return client.get_multi_slice(m)
_MULTI_SLICE_COLUMNS = [Column(utf8encode('a'), utf8encode('1'), 0), Column(utf8encode('b'), utf8encode('2'), 0), Column(utf8encode('c'), utf8encode('3'), 0), Column(utf8encode('e'), utf8encode('5'), 0), Column(utf8encode('f'), utf8encode('6'), 0)]
@since('2.0', max_version='4')
class TestMutations(TestThrift):
def truncate_all(self, *table_names):
for table in table_names:
client.truncate(table)
def test_insert(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
_verify_simple()
def test_empty_slice(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard2', 'Super1')
assert _big_slice(utf8encode('key1'), ColumnParent('Standard2')) == []
assert _big_slice(utf8encode('key1'), ColumnParent('Super1')) == []
def test_cas(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard3', 'Standard4')
def cas(expected, updates, column_family):
return client.cas(utf8encode('key1'), column_family, expected, updates, ConsistencyLevel.SERIAL, ConsistencyLevel.QUORUM)
def test_cas_operations(first_columns, second_columns, column_family):
# partition should be empty, so cas expecting any existing values should fail
cas_result = cas(first_columns, first_columns, column_family)
assert not cas_result.success
assert len(cas_result.current_values) == 0, cas_result
# cas of empty columns -> first_columns should succeed
# and the reading back from the table should match first_columns
assert cas([], first_columns, column_family).success
result = [cosc.column for cosc in _big_slice(utf8encode('key1'), ColumnParent(column_family))]
# CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
assert dict((c.name, c.value) for c in result) == dict((ex.name, ex.value) for ex in first_columns)
# now that the partition has been updated, repeating the
# operation which expects it to be empty should not succeed
cas_result = cas([], first_columns, column_family)
assert not cas_result.success
# When we CAS for non-existence, current_values is the first live column of the row
assert dict((c.name, c.value) for c in cas_result.current_values) == {first_columns[0].name: first_columns[0].value}, cas_result
# CL.SERIAL for reads
assert client.get(utf8encode('key1'), ColumnPath(column_family, column=first_columns[0].name), ConsistencyLevel.SERIAL).column.value == first_columns[0].value
# cas first_columns -> second_columns should succeed
assert cas(first_columns, second_columns, column_family).success
# as before, an operation with an incorrect expectation should fail
cas_result = cas(first_columns, second_columns, column_family)
assert not cas_result.success
updated_columns = [Column(utf8encode('c1'), utf8encode('value101'), 1),
Column(utf8encode('c2'), utf8encode('value102'), 1)]
logger.debug("Testing CAS operations on dynamic cf")
test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard1')
logger.debug("Testing CAS operations on static cf")
test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard3')
logger.debug("Testing CAS on mixed static/dynamic cf")
test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard4')
def test_missing_super(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc1'), _i64(1)), ConsistencyLevel.ONE))
_insert_super()
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc1'), _i64(1)), ConsistencyLevel.ONE))
def test_count(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2', 'Super1')
_insert_simple()
_insert_super()
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
assert client.get_count(utf8encode('key1'), ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
assert client.get_count(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
assert client.get_count(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), p, ConsistencyLevel.ONE) == 2
assert client.get_count(utf8encode('key1'), ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
# Let's make that a little more interesting
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c3'), utf8encode('value3'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c4'), utf8encode('value4'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c5'), utf8encode('value5'), 0), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange(utf8encode('c2'), utf8encode('c4'), False, 1000))
assert client.get_count(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
def test_count_paging(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
# Exercise paging
column_parent = ColumnParent('Standard1')
# Paging for small columns starts at 1024 columns
columns_to_insert = [Column(utf8encode('c%d' % (i,)), utf8encode('value%d' % (i,)), 0) for i in range(3, 1026)]
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
client.batch_mutate({utf8encode('key1') : cfmap}, ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 2000))
assert client.get_count(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE) == 1025
# Ensure that the count limit isn't clobbered
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 10))
assert client.get_count(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
# test get_count() to work correctly with 'count' settings around page size (CASSANDRA-4833)
def test_count_around_page_size(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
def slice_predicate(count):
return SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, count))
key = utf8encode('key1')
parent = ColumnParent('Standard1')
cl = ConsistencyLevel.ONE
for i in range(0, 3050):
client.insert(key, parent, Column(utf8encode(str(i)), utf8encode(''), 0), cl)
# same as page size
assert client.get_count(key, parent, slice_predicate(1024), cl) == 1024
# 1 above page size
assert client.get_count(key, parent, slice_predicate(1025), cl) == 1025
# above number or columns
assert client.get_count(key, parent, slice_predicate(4000), cl) == 3050
# same as number of columns
assert client.get_count(key, parent, slice_predicate(3050), cl) == 3050
# 1 above number of columns
assert client.get_count(key, parent, slice_predicate(3051), cl) == 3050
def test_super_insert(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super()
_verify_super()
def test_super_get(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super()
result = client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2')), ConsistencyLevel.ONE).super_column
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super()
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1))
column_parent = ColumnParent('Super1', utf8encode('sc2'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(5), utf8encode('value5'), 0)], slice
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), True, 1))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(6), utf8encode('value6'), 0)], slice
def test_long_order(self):
_set_keyspace('Keyspace1')
self.truncate_all('StandardLong1')
def long_xrange(start, stop, step):
i = start
while i < stop:
yield i
i += step
L = []
for i in long_xrange(0, 104294967296, 429496729):
name = _i64(i)
client.insert(utf8encode('key1'), ColumnParent('StandardLong1'), Column(name, utf8encode('v'), 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice(utf8encode('key1'), ColumnParent('StandardLong1'))]
assert slice == L, slice
def test_integer_order(self):
_set_keyspace('Keyspace1')
self.truncate_all('StandardInteger1')
def long_xrange(start, stop, step):
i = start
while i >= stop:
yield i
i -= step
L = []
for i in long_xrange(104294967296, 0, 429496729):
name = _i64(i)
client.insert(utf8encode('key1'), ColumnParent('StandardInteger1'), Column(name, utf8encode('v'), 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice(utf8encode('key1'), ColumnParent('StandardInteger1'))]
L.sort()
assert slice == L, slice
def test_time_uuid(self):
_set_keyspace('Keyspace2')
self.truncate_all('Super4')
import uuid
L = []
# 100 isn't enough to fail reliably if the comparator is borked
for i in range(500):
L.append(uuid.uuid1())
client.insert(utf8encode('key1'), ColumnParent('Super4', utf8encode('sc1')), Column(L[-1].bytes, utf8encode('value%s' % i), i), ConsistencyLevel.ONE)
slice = _big_slice(utf8encode('key1'), ColumnParent('Super4', utf8encode('sc1')))
assert len(slice) == 500, len(slice)
for i in range(500):
u = slice[i].column
assert u.value == utf8encode('value%s' % i)
assert u.name == L[i].bytes
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), True, 1))
column_parent = ColumnParent('Super4', utf8encode('sc1'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[-1].bytes, utf8encode('value499'), 499)], slice
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', utf8encode('sc1'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[0].bytes, utf8encode('value0'), 0),
Column(L[1].bytes, utf8encode('value1'), 1),
Column(L[2].bytes, utf8encode('value2'), 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, utf8encode(''), True, 1000))
column_parent = ColumnParent('Super4', utf8encode('sc1'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, utf8encode('value2'), 2),
Column(L[1].bytes, utf8encode('value1'), 1),
Column(L[0].bytes, utf8encode('value0'), 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, utf8encode(''), False, 1))
column_parent = ColumnParent('Super4', utf8encode('sc1'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, utf8encode('value2'), 2)], slice
def test_long_remove(self):
_set_keyspace('Keyspace1')
self.truncate_all('StandardLong1')
column_parent = ColumnParent('StandardLong1')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1))
for i in range(10):
parent = ColumnParent('StandardLong1')
client.insert(utf8encode('key1'), parent, Column(_i64(i), utf8encode('value1'), 10 * i), ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice(utf8encode('key1'), column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert(utf8encode('key1'), parent, Column(_i64(i), utf8encode('value2'), 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), utf8encode('value2'), 10 * i + 2)], (slice, i)
def test_integer_remove(self):
_set_keyspace('Keyspace1')
self.truncate_all('StandardInteger1')
column_parent = ColumnParent('StandardInteger1')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1))
for i in range(10):
parent = ColumnParent('StandardInteger1')
client.insert(utf8encode('key1'), parent, Column(_i64(i), utf8encode('value1'), 10 * i), ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice(utf8encode('key1'), column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert(utf8encode('key1'), parent, Column(_i64(i), utf8encode('value2'), 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), utf8encode('value2'), 10 * i + 2)], (slice, i)
def test_batch_insert(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2')
_insert_batch()
_verify_batch()
def test_batch_mutate_standard_columns(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2')
column_families = ['Standard1', 'Standard2']
keys = [utf8encode('key_%d' % i) for i in range(27, 32)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, utf8encode('c1'), utf8encode('value1'))
def test_batch_mutate_remove_standard_columns(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2')
column_families = ['Standard1', 'Standard2']
keys = [utf8encode('key_%d' % i) for i in range(11, 21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_standard_row(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2')
column_families = ['Standard1', 'Standard2']
keys = [utf8encode('key_%d' % i) for i in range(11, 21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20))]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_super_columns_with_standard_under(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1', 'Super2')
column_families = ['Super1', 'Super2']
keys = [utf8encode('key_%d' % i) for i in range(11, 21)]
_insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc.columns:
names.append(c.name)
mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
keys = [utf8encode('key_%d' % i) for i in range(17, 21)]
for key in keys:
_insert_super(key)
mutations = []
for sc in _SUPER_COLUMNS:
mutations.append(Mutation(deletion=Deletion(20,
super_column=sc.name)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_super_columns_entire_row(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
keys = [utf8encode('key_%d' % i) for i in range(17, 21)]
for key in keys:
_insert_super(key)
mutations = []
mutations.append(Mutation(deletion=Deletion(20)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
# known failure: see CASSANDRA-10046
def test_batch_mutate_remove_slice_standard(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
columns = [Column(utf8encode('c1'), utf8encode('value1'), 0),
Column(utf8encode('c2'), utf8encode('value2'), 0),
Column(utf8encode('c3'), utf8encode('value3'), 0),
Column(utf8encode('c4'), utf8encode('value4'), 0),
Column(utf8encode('c5'), utf8encode('value5'), 0)]
for column in columns:
client.insert(utf8encode('key'), ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start=utf8encode('c2'), finish=utf8encode('c4'))))
client.batch_mutate({utf8encode('key'): {'Standard1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c1')))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c2')))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c3')))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c4')))
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c5')))
# known failure: see CASSANDRA-10046
def test_batch_mutate_remove_slice_of_entire_supercolumns(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
columns = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(1), utf8encode('value1'), 0)]),
SuperColumn(name=utf8encode('sc2'),
columns=[Column(_i64(2), utf8encode('value2') , 0), Column(_i64(3), utf8encode('value3') , 0)]),
SuperColumn(name=utf8encode('sc3'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc4'),
columns=[Column(_i64(5), utf8encode('value5') , 0), Column(_i64(6), utf8encode('value6') , 0)]),
SuperColumn(name=utf8encode('sc5'), columns=[Column(_i64(7), utf8encode('value7'), 0)])]
for column in columns:
for subcolumn in column.columns:
client.insert(utf8encode('key'), ColumnParent('Super1', column.name), subcolumn, ConsistencyLevel.ONE)
d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start=utf8encode('sc2') , finish=utf8encode('sc4') )))
client.batch_mutate({utf8encode('key'): {'Super1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(1)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc2'), column=_i64(2)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc2'), column=_i64(3)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc3'), column=_i64(4)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc4'), column=_i64(5)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc4'), column=_i64(6)))
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc5'), column=_i64(7)))
@since('1.0', '2.2')
@pytest.mark.skip(reason="Runs but fails and looks like it actually should fail since 8099?")
def test_batch_mutate_remove_slice_part_of_supercolumns(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
columns = [Column(_i64(1), utf8encode('value1'), 0),
Column(_i64(2), utf8encode('value2'), 0),
Column(_i64(3), utf8encode('value3'), 0),
Column(_i64(4), utf8encode('value4'), 0),
Column(_i64(5), utf8encode('value5'), 0)]
for column in columns:
client.insert(utf8encode('key'), ColumnParent('Super1', utf8encode('sc1')), column, ConsistencyLevel.ONE)
r = SliceRange(start=_i64(2), finish=_i64(4))
d = Deletion(1, super_column=utf8encode('sc1') , predicate=SlicePredicate(slice_range=r))
client.batch_mutate({utf8encode('key'): {'Super1' : [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(1)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(2)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(3)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(4)))
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(5)))
def test_batch_mutate_insertions_and_deletions(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1', 'Super2')
first_insert = SuperColumn(utf8encode("sc1"),
columns=[Column(_i64(20), utf8encode('value20'), 3),
Column(_i64(21), utf8encode('value21'), 3)])
second_insert = SuperColumn(utf8encode("sc1"),
columns=[Column(_i64(20), utf8encode('value20'), 3),
Column(_i64(21), utf8encode('value21'), 3)])
first_deletion = {'super_column': utf8encode("sc1"),
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
second_deletion = {'super_column': utf8encode("sc2"),
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
keys = [utf8encode('key_30'), utf8encode('key_31')]
for key in keys:
sc = SuperColumn(utf8encode('sc1'), [Column(_i64(22), utf8encode('value22'), 0),
Column(_i64(23), utf8encode('value23'), 0)])
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
sc2 = SuperColumn(utf8encode('sc2'), [Column(_i64(22), utf8encode('value22'), 0),
Column(_i64(23), utf8encode('value23'), 0)])
cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
cfmap3 = {
'Super1': [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
Mutation(deletion=Deletion(3, **first_deletion))],
'Super2': [Mutation(deletion=Deletion(2, **second_deletion)),
Mutation(ColumnOrSuperColumn(super_column=second_insert))]
}
keyed_mutations = dict((key, cfmap3) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for key in keys:
for c in [_i64(22), _i64(23)]:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=utf8encode('sc1'), column=c))
_assert_no_columnpath(key, ColumnPath('Super2', super_column=utf8encode('sc2'), column=c))
for c in [_i64(20), _i64(21)]:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=utf8encode('sc1'), column=c))
_assert_columnpath_exists(key, ColumnPath('Super2', super_column=utf8encode('sc1'), column=c))
def test_bad_system_calls(self):
def duplicate_index_names():
_set_keyspace('Keyspace1')
cd1 = ColumnDef(utf8encode('foo'), 'BytesType', IndexType.KEYS, 'i')
cd2 = ColumnDef(utf8encode('bar'), 'BytesType', IndexType.KEYS, 'i')
cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
client.system_add_column_family(cf)
_expect_exception(duplicate_index_names, InvalidRequestException)
def test_bad_batch_calls(self):
# mutate_does_not_accept_cosc_and_deletion_in_same_mutation
def too_full():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column(utf8encode("foo"), utf8encode('bar'), 0))
dele = Deletion(2, predicate=SlicePredicate(column_names=[utf8encode('baz')]))
client.batch_mutate({utf8encode('key_34'): {'Standard1': [Mutation(col, dele)]}},
ConsistencyLevel.ONE)
_expect_exception(too_full, InvalidRequestException)
# test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
def bad_cf():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column(utf8encode("foo"), utf8encode('bar'), 0))
client.batch_mutate({utf8encode('key_36'): {'Undefined': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# test_batch_mutate_does_not_accept_deletion_on_undefined_cf
def bad_cf_2():
_set_keyspace('Keyspace1')
d = Deletion(2, predicate=SlicePredicate(column_names=[utf8encode('baz')]))
client.batch_mutate({utf8encode('key_37'): {'Undefined': [Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf_2, InvalidRequestException)
# a column value that does not match the declared validator
def send_string_instead_of_long():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column(utf8encode('birthdate'), utf8encode('bar'), 0))
client.batch_mutate({utf8encode('key_38'): {'Indexed1': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(send_string_instead_of_long, InvalidRequestException)
def test_column_name_lengths(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode(''), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 1), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 127), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 128), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 129), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 255), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 256), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 257), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * (2 ** 16 - 1)), utf8encode('value'), 0), ConsistencyLevel.ONE)
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * (2 ** 16)), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
def test_bad_calls(self):
_set_keyspace('Keyspace1')
# missing arguments
_expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Standard1', utf8encode('x')), Column(utf8encode('y'), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
# no supercolumn in a super CF
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Super1'), Column(utf8encode('y'), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
# column but no supercolumn in remove
_expect_exception(lambda: client.remove(utf8encode('key1'), ColumnPath('Super1', column=utf8encode('x')), 0, ConsistencyLevel.ONE), InvalidRequestException)
# super column in non-super CF
_expect_exception(lambda: client.remove(utf8encode('key1'), ColumnPath('Standard1', utf8encode('y'), utf8encode('x')), 0, ConsistencyLevel.ONE), InvalidRequestException)
# key too long
_expect_exception(lambda: client.get(utf8encode('x' * 2 ** 16), ColumnPath('Standard1', column=utf8encode('c1')), ConsistencyLevel.ONE), InvalidRequestException)
# empty key
_expect_exception(lambda: client.get(utf8encode(''), ColumnPath('Standard1', column=utf8encode('c1')), ConsistencyLevel.ONE), InvalidRequestException)
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
_expect_exception(lambda: client.batch_mutate({utf8encode(''): cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
# empty column name
_expect_exception(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('')), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify column name
_expect_exception(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', utf8encode('x'), utf8encode('y')), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify supercolumn name
_expect_exception(lambda: client.get(utf8encode('key1'), ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
# invalid CF
_expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=[utf8encode(''), utf8encode('')]), utf8encode(''), utf8encode(''), 5, ConsistencyLevel.ONE), InvalidRequestException)
# 'x' is not a valid Long
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1')), Column(utf8encode('x'), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
# start is not a valid Long
p = SlicePredicate(slice_range=SliceRange(utf8encode('x'), utf8encode(''), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start is not a valid Long, supercolumn version
p = SlicePredicate(slice_range=SliceRange(utf8encode('x'), utf8encode(''), False, 1))
column_parent = ColumnParent('Super1', utf8encode('sc1'))
_expect_exception(lambda: client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, supercolumn version
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('Super1', utf8encode('sc1'))
_expect_exception(lambda: client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, key version
_expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('')]), utf8encode('z'), utf8encode('a'), 1, ConsistencyLevel.ONE), InvalidRequestException)
# ttl must be greater or equals to zero
column = Column(utf8encode('cttl1'), utf8encode('value1'), 0, -1)
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
InvalidRequestException)
# don't allow super_column in Deletion for standard Columntest_expiration_with_default_ttl_and_zero_ttl
deletion = Deletion(1, utf8encode('supercolumn'), None)
mutation = Mutation(deletion=deletion)
mutations = {utf8encode('key'): {'Standard1': [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
InvalidRequestException)
# 'x' is not a valid long
deletion = Deletion(1, utf8encode('x'), None)
mutation = Mutation(deletion=deletion)
mutations = {utf8encode('key'): {'Super3': [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
# counters don't support ANY
_expect_exception(lambda: client.add(utf8encode('key1'), ColumnParent('Counter1', utf8encode('x')), CounterColumn(utf8encode('y'), 1), ConsistencyLevel.ANY), InvalidRequestException)
def test_batch_insert_super(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1', 'Super2')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({utf8encode('key1'): cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_cf_remove_column(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c1')), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c1')), ConsistencyLevel.ONE))
assert client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c2')), ConsistencyLevel.ONE).column \
== Column(utf8encode('c2'), utf8encode('value2'), 0)
assert _big_slice(utf8encode('key1'), ColumnParent('Standard1')) \
== [ColumnOrSuperColumn(column=Column(utf8encode('c2'), utf8encode('value2'), 0))]
# New insert, make sure it shows up post-remove:
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c3'), utf8encode('value3'), 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard1'))]
assert columns == [Column(utf8encode('c2'), utf8encode('value2'), 0), Column(utf8encode('c3'), utf8encode('value3'), 0)], columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard1'))]
assert columns == [Column(utf8encode('c2'), utf8encode('value2'), 0), Column(utf8encode('c3'), utf8encode('value3'), 0)], columns
# Next, w/ a newer timestamp; it should come back:
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 2), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard1'))]
assert columns == [Column(utf8encode('c1'), utf8encode('value1'), 2), Column(utf8encode('c2'), utf8encode('value2'), 0), Column(utf8encode('c3'), utf8encode('value3'), 0)], columns
def test_cf_remove(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
_insert_simple()
_insert_super()
# Remove the key1:Standard1 cf; verify super is unaffected
client.remove(utf8encode('key1'), ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
assert _big_slice(utf8encode('key1'), ColumnParent('Standard1')) == []
_verify_super()
# Test resurrection. First, re-insert a value w/ older timestamp,
# and make sure it stays removed:
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 0), ConsistencyLevel.ONE)
assert _big_slice(utf8encode('key1'), ColumnParent('Standard1')) == []
# Next, w/ a newer timestamp; it should come back:
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 4), ConsistencyLevel.ONE)
result = _big_slice(utf8encode('key1'), ColumnParent('Standard1'))
assert result == [ColumnOrSuperColumn(column=Column(utf8encode('c1'), utf8encode('value1'), 4))], result
# check removing the entire super cf, too.
client.remove(utf8encode('key1'), ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
assert _big_slice(utf8encode('key1'), ColumnParent('Super1')) == []
assert _big_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1'))) == []
def test_super_cf_remove_and_range_slice(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
client.insert(utf8encode('key3'), ColumnParent('Super1', utf8encode('sc1')), Column(_i64(1), utf8encode('v1'), 0), ConsistencyLevel.ONE)
client.remove(utf8encode('key3'), ColumnPath('Super1', utf8encode('sc1')), 5, ConsistencyLevel.ONE)
rows = {}
for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000)), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE):
scs = [cosc.super_column for cosc in row.columns]
rows[row.key] = scs
assert rows == {utf8encode('key3'): []}, rows
def test_super_cf_remove_column(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2'), _i64(5)), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2'), _i64(5)), ConsistencyLevel.ONE))
super_columns = [result.super_column for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'), columns=[Column(_i64(6), utf8encode('value6'), 0)])]
_verify_simple()
# New insert, make sure it shows up post-remove:
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(7), utf8encode('value7'), 0), ConsistencyLevel.ONE)
super_columns_expected = [SuperColumn(name=utf8encode('sc1'),
columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'),
columns=[Column(_i64(6), utf8encode('value6'), 0), Column(_i64(7), utf8encode('value7'), 0)])]
super_columns = [result.super_column for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 0), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 6), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'), columns=[Column(_i64(5), utf8encode('value5'), 6),
Column(_i64(6), utf8encode('value6'), 0),
Column(_i64(7), utf8encode('value7'), 0)])]
assert super_columns == super_columns_expected, super_columns
# shouldn't be able to specify a column w/o a super column for remove
cp = ColumnPath(column_family='Super1', column=utf8encode('sc2'))
e = _expect_exception(lambda: client.remove(utf8encode('key1'), cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("column cannot be specified without") >= 0
def test_super_cf_remove_supercolumn(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2')), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2'), _i64(5)), ConsistencyLevel.ONE))
super_columns = _big_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')))
assert super_columns == [], super_columns
super_columns_expected = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)])]
super_columns = [result.super_column
for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
_verify_simple()
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 1), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 6), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'), columns=[Column(_i64(5), utf8encode('value5'), 6)])]
assert super_columns == super_columns_expected, super_columns
# check slicing at the subcolumn level too
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
columns = [result.column
for result in client.get_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), p, ConsistencyLevel.ONE)]
assert columns == [Column(_i64(5), utf8encode('value5'), 6)], columns
def test_super_cf_resurrect_subcolumn(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
key = utf8encode('vijay')
client.insert(key, ColumnParent('Super1', utf8encode('sc1')), Column(_i64(4), utf8encode('value4'), 0), ConsistencyLevel.ONE)
client.remove(key, ColumnPath('Super1', utf8encode('sc1')), 1, ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', utf8encode('sc1')), Column(_i64(4), utf8encode('value4'), 2), ConsistencyLevel.ONE)
result = client.get(key, ColumnPath('Super1', utf8encode('sc1')), ConsistencyLevel.ONE)
assert result.super_column.columns is not None, result.super_column
def test_empty_range(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE) == []
_insert_simple()
assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE) == []
@since('2.1', max_version='4')
def test_super_cql_read_compatibility(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super(utf8encode("key1"))
_insert_super(utf8encode("key2"))
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.execute('USE "Keyspace1"')
assert_all(session, "SELECT * FROM \"Super1\"",
[[utf8encode("key1"), utf8encode("sc1"), 4, utf8encode("value4")],
[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key1"), utf8encode("sc2"), 6, utf8encode("value6")],
[utf8encode("key2"), utf8encode("sc1"), 4, utf8encode("value4")],
[utf8encode("key2"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key2"), utf8encode("sc2"), 6, utf8encode("value6")]])
assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1')",
[[utf8encode("key1"), utf8encode("sc1"), 4, utf8encode("value4")],
[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key1"), utf8encode("sc2"), 6, utf8encode("value6")]])
assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2')",
[[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key1"), utf8encode("sc2"), 6, utf8encode("value6")]])
assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2') AND column2 = 5",
[[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")]])
assert_all(session, "SELECT * FROM \"Super1\" WHERE key = textAsBlob('key1') AND column1 = textAsBlob('sc2')",
[[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key1"), utf8encode("sc2"), 6, utf8encode("value6")]])
assert_all(session, "SELECT column2, value FROM \"Super1\" WHERE key = textAsBlob('key1') AND column1 = textAsBlob('sc2')",
[[5, utf8encode("value5")],
[6, utf8encode("value6")]])
@since('2.1', max_version='4')
def test_super_cql_write_compatibility(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.execute('USE "Keyspace1"')
query = "INSERT INTO \"Super1\" (key, column1, column2, value) VALUES (textAsBlob(%s), textAsBlob(%s), %s, textAsBlob(%s)) USING TIMESTAMP 1234"
session.execute(query, ("key1", "sc1", 4, "value4"))
session.execute(query, ("key1", "sc2", 5, "value5"))
session.execute(query, ("key1", "sc2", 6, "value6"))
session.execute(query, ("key2", "sc1", 4, "value4"))
session.execute(query, ("key2", "sc2", 5, "value5"))
session.execute(query, ("key2", "sc2", 6, "value6"))
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc1'), utf8encode('sc2'), False, 2))
result = client.get_slice(utf8encode('key1'), ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert_length_equal(result, 2)
assert result[0].super_column.name == utf8encode('sc1')
assert result[0].super_column.columns[0], Column(_i64(4), utf8encode('value4') == 1234)
assert result[1].super_column.name == utf8encode('sc2')
assert result[1].super_column.columns, [Column(_i64(5), utf8encode('value5'), 1234), Column(_i64(6), utf8encode('value6') == 1234)]
def test_range_with_remove(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode('key1'), utf8encode(''), 1000, ConsistencyLevel.ONE)[0].key == utf8encode('key1')
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c1')), 1, ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c2')), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c2')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key=utf8encode('key1'))], actual
def test_range_with_remove_cf(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode('key1'), utf8encode(''), 1000, ConsistencyLevel.ONE)[0].key == utf8encode('key1')
client.remove(utf8encode('key1'), ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key=utf8encode('key1'))], actual
def test_range_collation(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in range(100)]:
key = utf8encode(key)
client.insert(key, ColumnParent('Standard1'), Column(key, utf8encode('v'), 0), ConsistencyLevel.ONE)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE)
L = ['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'b']
assert len(slices) == len(L)
for key, ks in zip(L, slices):
key = utf8encode(key)
assert key == ks.key
def test_range_partial(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in range(100)]:
key = utf8encode(key)
client.insert(key, ColumnParent('Standard1'), Column(key, utf8encode('v'), 0), ConsistencyLevel.ONE)
def check_slices_against_keys(keyList, sliceList):
assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
for key, ks in zip(keyList, sliceList):
key = utf8encode(key)
assert key == ks.key
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode('a'), utf8encode(''), 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['a', 'b'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode(''), utf8encode('15'), 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode('50'), utf8encode('51'), 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['50', '51'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode('1'), utf8encode(''), 10, ConsistencyLevel.ONE)
check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
def test_get_slice_range(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_range()
_verify_range()
def test_get_slice_super_range(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super_range()
_verify_super_range()
def test_get_range_slices_tokens(self):
_set_keyspace('Keyspace2')
self.truncate_all('Super3')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
key = utf8encode(key)
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
cnameutf = utf8encode(cname)
client.insert(key, ColumnParent('Super3', utf8encode('sc1')), Column(cnameutf, utf8encode('v-' + cname), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', utf8encode('sc1'))
predicate = SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')])
range = KeyRange(start_token='55', end_token='55', count=100)
result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
assert len(result) == 5
assert result[0].columns[0].column.name == utf8encode('col1')
assert result[0].columns[1].column.name == utf8encode('col3')
def test_get_range_slice_super(self):
_set_keyspace('Keyspace2')
self.truncate_all('Super3')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
key = utf8encode(key)
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
cnameutf = utf8encode(cname)
client.insert(key, ColumnParent('Super3', utf8encode('sc1')), Column(cnameutf, utf8encode('v-' + cname), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', utf8encode('sc1'))
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), utf8encode('key2'), utf8encode('key4'), 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert result[0].columns[0].column.name == utf8encode('col1')
assert result[0].columns[1].column.name == utf8encode('col3')
cp = ColumnParent('Super3')
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('sc1')]), utf8encode('key2'), utf8encode('key4'), 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert list(set(row.columns[0].super_column.name for row in result))[0] == utf8encode('sc1')
def test_get_range_slice(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
key = utf8encode(key)
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
cnameutf = utf8encode(cname)
client.insert(key, ColumnParent('Standard1'), Column(cnameutf, utf8encode('v-' + cname), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
# test empty slice
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), utf8encode('key6'), utf8encode(''), 1, ConsistencyLevel.ONE)
assert len(result) == 0
# test empty columns
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('a')]), utf8encode('key2'), utf8encode(''), 1, ConsistencyLevel.ONE)
assert len(result) == 1
assert len(result[0].columns) == 0
# test column_names predicate
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), utf8encode('key2'), utf8encode('key4'), 5, ConsistencyLevel.ONE)
assert len(result) == 3, result
assert result[0].columns[0].column.name == utf8encode('col1')
assert result[0].columns[1].column.name == utf8encode('col3')
# row limiting via count.
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), utf8encode('key2'), utf8encode('key4'), 1, ConsistencyLevel.ONE)
assert len(result) == 1
# test column slice predicate
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start=utf8encode('col2'), finish=utf8encode('col4'), reversed=False, count=5)), utf8encode('key1'), utf8encode('key2'), 5, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].key == utf8encode('key1')
assert result[1].key == utf8encode('key2')
assert len(result[0].columns) == 3
assert result[0].columns[0].column.name == utf8encode('col2')
assert result[0].columns[2].column.name == utf8encode('col4')
# col limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start=utf8encode('col2'), finish=utf8encode('col4'), reversed=False, count=2)), utf8encode('key1'), utf8encode('key2'), 5, ConsistencyLevel.ONE)
assert len(result[0].columns) == 2
# and reversed
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start=utf8encode('col4'), finish=utf8encode('col2'), reversed=True, count=5)), utf8encode('key1'), utf8encode('key2'), 5, ConsistencyLevel.ONE)
assert result[0].columns[0].column.name == utf8encode('col4')
assert result[0].columns[2].column.name == utf8encode('col2')
# row limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start=utf8encode('col2'), finish=utf8encode('col4'), reversed=False, count=5)), utf8encode('key1'), utf8encode('key2'), 1, ConsistencyLevel.ONE)
assert len(result) == 1
# removed data
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('col1')), 1, ConsistencyLevel.ONE)
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''))), utf8encode('key1'), utf8encode('key2'), 5, ConsistencyLevel.ONE)
assert len(result) == 2, result
assert result[0].columns[0].column.name == utf8encode('col2'), result[0].columns[0].column.name
assert result[1].columns[0].column.name == utf8encode('col1')
def test_wrapped_range_slices(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
def copp_token(key):
# I cheated and generated this from Java
return {'a': '00530000000100000001',
'b': '00540000000100000001',
'c': '00550000000100000001',
'd': '00560000000100000001',
'e': '00580000000100000001'}[key]
keylist = [utf8encode(key) for key in ['a', 'b', 'c', 'd', 'e']]
for key in keylist:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
cnameutf = utf8encode(cname)
client.insert(key, ColumnParent('Standard1'), Column(cnameutf, utf8encode('v-' + cname), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
result = client.get_range_slices(cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
assert [row.key for row in result] == keylist, [row.key for row in result]
result = client.get_range_slices(cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
assert [row.key for row in result] == keylist, [row.key for row in result]
def test_get_slice_by_names(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
_insert_range()
p = SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c2')])
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == utf8encode('c1')
assert result[1].column.name == utf8encode('c2')
_insert_super()
p = SlicePredicate(column_names=[_i64(4)])
result = client.get_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1')), p, ConsistencyLevel.ONE)
assert len(result) == 1
assert result[0].column.name == _i64(4)
def test_multiget_slice_with_compact_table(self):
"""Insert multiple keys in a compact table and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
# create
cd = ColumnDef(utf8encode('v'), 'AsciiType', None, None)
newcf = CfDef('Keyspace1', 'CompactColumnFamily', default_validation_class='AsciiType', column_metadata=[cd])
client.system_add_column_family(newcf)
CL = ConsistencyLevel.ONE
for i in range(0, 5):
client.insert(utf8encode('key' + str(i)), ColumnParent('CompactColumnFamily'), Column(utf8encode('v'), utf8encode('value' + str(i)), 0), CL)
time.sleep(0.1)
p = SlicePredicate(column_names=[utf8encode('v')])
rows = client.multiget_slice([utf8encode('key' + str(i)) for i in range(0, 5)], ColumnParent('CompactColumnFamily'), p, ConsistencyLevel.ONE)
for i in range(0, 5):
key = utf8encode('key' + str(i))
assert key in rows
assert len(rows[key]) == 1
assert rows[key][0].column.name == utf8encode('v')
assert rows[key][0].column.value == utf8encode('value' + str(i))
def test_multiget_slice(self):
"""Insert multiple keys and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
# Generate a list of 10 keys and insert them
num_keys = 10
keys = [utf8encode('key' + str(i)) for i in range(1, num_keys + 1)]
_insert_multi(keys)
# Retrieve all 10 key slices
rows = _big_multislice(keys, ColumnParent('Standard1'))
columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
# Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
for key in keys:
assert key in rows
assert columns == rows[key]
def test_multi_count(self):
"""Insert multiple keys and count them using the multiget interface"""
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
# Generate a list of 10 keys countaining 1 to 10 columns and insert them
num_keys = 10
for i in range(1, num_keys + 1):
key = utf8encode('key' + str(i))
for j in range(1, i + 1):
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('c' + str(j)), utf8encode('value' + str(j)), 0), ConsistencyLevel.ONE)
# Count columns in all 10 keys
keys = [utf8encode('key' + str(i)) for i in range(1, num_keys + 1)]
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
# Check the returned counts
for i in range(1, num_keys + 1):
key = utf8encode('key' + str(i))
assert counts[key] == i
def test_batch_mutate_super_deletion(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super('test')
d = Deletion(1, predicate=SlicePredicate(column_names=[utf8encode('sc1')]))
cfmap = {'Super1': [Mutation(deletion=d)]}
client.batch_mutate({utf8encode('test'): cfmap}, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc1')), ConsistencyLevel.ONE))
def test_super_reinsert(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
for x in range(3):
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(x), utf8encode('value'), 1), ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
for x in range(3):
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(x + 3), utf8encode('value'), 3), ConsistencyLevel.ONE)
for n in range(1, 4):
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, n))
slice = client.get_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), p, ConsistencyLevel.ONE)
assert len(slice) == n, "expected %s results; found %s" % (n, slice)
def test_describe_keyspace(self):
try:
client.system_drop_keyspace("ValidKsForUpdate")
except InvalidRequestException:
pass # The keyspace doesn't exit, because this test was run in isolation.
kspaces = client.describe_keyspaces()
if self.cluster.version() >= '3.0':
assert len(kspaces) == 7, [x.name for x in kspaces] # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth', 'system_distributed', 'system_schema']
elif self.cluster.version() >= '2.2':
assert len(kspaces) == 6, [x.name for x in kspaces] # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth', 'system_distributed']
else:
assert len(kspaces) == 4, [x.name for x in kspaces] # ['Keyspace2', 'Keyspace1', 'system', 'system_traces']
sysks = client.describe_keyspace("system")
assert sysks in kspaces
ks1 = client.describe_keyspace("Keyspace1")
assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
for cf in ks1.cf_defs:
if cf.name == "Standard1":
cf0 = cf
break
assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
def test_describe(self):
assert client.describe_cluster_name() == 'test'
def test_describe_ring(self):
assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
def test_describe_token_map(self):
# test/conf/cassandra.yaml specifies org.apache.cassandra.dht.ByteOrderedPartitioner
# which uses BytesToken, so this just tests that the string representation of the token
# matches a regex pattern for BytesToken.toString().
ring = list(client.describe_token_map().items())
if not self.dtest_config.use_vnodes:
assert len(ring) == 1
else:
assert len(ring) == int(self.dtest_config.num_tokens)
token, node = ring[0]
if self.dtest_config.use_vnodes:
assert re.match("[0-9A-Fa-f]{32}", token)
assert node == '127.0.0.1'
def test_describe_partitioner(self):
# Make sure this just reads back the values from the config.
assert client.describe_partitioner() == "org.apache.cassandra.dht.ByteOrderedPartitioner"
def test_describe_snitch(self):
assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
def test_invalid_ks_names(self):
def invalid_keyspace():
client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[]))
_expect_exception(invalid_keyspace, InvalidRequestException)
def test_invalid_strategy_class(self):
def add_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def update_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[]))
client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def test_invalid_cf_names(self):
def invalid_cf():
_set_keyspace('Keyspace1')
newcf = CfDef('Keyspace1', 'in-valid')
client.system_add_column_family(newcf)
_expect_exception(invalid_cf, InvalidRequestException)
def invalid_cf_inside_new_ks():
cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
_set_keyspace('system')
client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
_expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
def test_system_cf_recreate(self):
"ensures that keyspaces and column familes can be dropped and recreated in short order"
for x in range(2):
keyspace = 'test_cf_recreate'
cf_name = 'recreate_cf'
# create
newcf = CfDef(keyspace, cf_name)
newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[newcf])
client.system_add_keyspace(newks)
_set_keyspace(keyspace)
# insert
client.insert(utf8encode('key0'), ColumnParent(cf_name), Column(utf8encode('colA'), utf8encode('colA-value'), 0), ConsistencyLevel.ONE)
col1 = client.get_slice(utf8encode('key0'), ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 100)), ConsistencyLevel.ONE)[0].column
assert col1.name == utf8encode('colA') and col1.value == utf8encode('colA-value')
# drop
client.system_drop_column_family(cf_name)
# recreate
client.system_add_column_family(newcf)
# query
cosc_list = client.get_slice(utf8encode('key0'), ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 100)), ConsistencyLevel.ONE)
# this was failing prior to CASSANDRA-1477.
assert len(cosc_list) == 0, 'cosc length test failed'
client.system_drop_keyspace(keyspace)
def test_system_keyspace_operations(self):
# create. note large RF, this is OK
keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{'replication_factor': '10'},
cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
client.system_add_keyspace(keyspace)
newks = client.describe_keyspace('CreateKeyspace')
assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
_set_keyspace('CreateKeyspace')
# modify valid
modified_keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
{'replication_factor': '1'},
cf_defs=[])
client.system_update_keyspace(modified_keyspace)
modks = client.describe_keyspace('CreateKeyspace')
assert modks.strategy_class == modified_keyspace.strategy_class
assert modks.strategy_options == modified_keyspace.strategy_options
# check strategy options are validated on modify
def modify_invalid_ks():
client.system_update_keyspace(KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{},
cf_defs=[]))
_expect_exception(modify_invalid_ks, InvalidRequestException)
# drop
client.system_drop_keyspace('CreateKeyspace')
def get_second_ks():
client.describe_keyspace('CreateKeyspace')
_expect_exception(get_second_ks, NotFoundException)
# check strategy options are validated on creation
def create_invalid_ks():
client.system_add_keyspace(KsDef('InvalidKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{},
cf_defs=[]))
_expect_exception(create_invalid_ks, InvalidRequestException)
def test_create_then_drop_ks(self):
keyspace = KsDef('AddThenDrop',
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor': '1'},
cf_defs=[])
def test_existence():
client.describe_keyspace(keyspace.name)
_expect_exception(test_existence, NotFoundException)
client.set_keyspace('system')
client.system_add_keyspace(keyspace)
test_existence()
client.system_drop_keyspace(keyspace.name)
def test_column_validators(self):
# columndef validation for regular CF
ks = 'Keyspace1'
_set_keyspace(ks)
cd = ColumnDef(utf8encode('col'), 'LongType', None, None)
cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
cp = ColumnParent('ValidatorColumnFamily')
col0 = Column(utf8encode('col'), _i64(42), 0)
col1 = Column(utf8encode('col'), utf8encode("ceci n'est pas 64bit"), 0)
client.insert(utf8encode('key0'), cp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert(utf8encode('key1'), cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef validation for super CF
scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
client.system_add_column_family(scf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
scp = ColumnParent('ValidatorSuperColumnFamily', utf8encode('sc1'))
client.insert(utf8encode('key0'), scp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert(utf8encode('key1'), scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef and cfdef default validation
cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
dcp = ColumnParent('DefaultValidatorColumnFamily')
# inserting a longtype into column 'col' is valid at the columndef level
client.insert(utf8encode('key0'), dcp, col0, ConsistencyLevel.ONE)
# inserting a UTF8type into column 'col' fails at the columndef level
e = _expect_exception(lambda: client.insert(utf8encode('key1'), dcp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a longtype into column 'fcol' should fail at the cfdef level
col2 = Column(utf8encode('fcol'), _i64(4224), 0)
e = _expect_exception(lambda: client.insert(utf8encode('key1'), dcp, col2, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a UTF8type into column 'fcol' is valid at the cfdef level
col3 = Column(utf8encode('fcol'), utf8encode("Stringin' it up in the Stringtel Stringifornia"), 0)
client.insert(utf8encode('key0'), dcp, col3, ConsistencyLevel.ONE)
def test_system_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef(utf8encode('ValidationColumn'), 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' in [x.name for x in ks1.cf_defs]
cfid = [x.id for x in ks1.cf_defs if x.name == 'NewColumnFamily'][0]
# modify invalid
modified_cf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
modified_cf.id = cfid
def fail_invalid_field():
modified_cf.comparator_type = 'LongType'
client.system_update_column_family(modified_cf)
_expect_exception(fail_invalid_field, InvalidRequestException)
# modify valid
modified_cf.comparator_type = 'BytesType' # revert back to old value.
modified_cf.gc_grace_seconds = 1
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name == 'NewColumnFamily'][0]
assert server_cf
assert server_cf.gc_grace_seconds == 1
# drop
client.system_drop_column_family('NewColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
# Make a LongType CF and add a validator
newcf = CfDef('Keyspace1', 'NewLongColumnFamily', comparator_type='LongType')
client.system_add_column_family(newcf)
three = _i64(3)
cd = ColumnDef(three, 'LongType', None, None)
ks1 = client.describe_keyspace('Keyspace1')
modified_cf = [x for x in ks1.cf_defs if x.name == 'NewLongColumnFamily'][0]
modified_cf.column_metadata = [cd]
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name == 'NewLongColumnFamily'][0]
assert server_cf.column_metadata[0].name == _i64(3), server_cf.column_metadata
def test_dynamic_indexes_creation_deletion(self):
_set_keyspace('Keyspace1')
cfdef = CfDef('Keyspace1', 'BlankCF')
client.system_add_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name == 'BlankCF'][0]
modified_cd = ColumnDef(utf8encode('birthdate'), 'BytesType', IndexType.KEYS, None)
modified_cf = CfDef('Keyspace1', 'BlankCF', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
# Add a second indexed CF ...
birthdate_coldef = ColumnDef(utf8encode('birthdate'), 'BytesType', IndexType.KEYS, None)
age_coldef = ColumnDef(utf8encode('age'), 'BytesType', IndexType.KEYS, 'age_index')
cfdef = CfDef('Keyspace1', 'BlankCF2', column_metadata=[birthdate_coldef, age_coldef])
client.system_add_column_family(cfdef)
# ... and update it to have a third index
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name == 'BlankCF2'][0]
name_coldef = ColumnDef(utf8encode('name'), 'BytesType', IndexType.KEYS, 'name_index')
cfdef.column_metadata.append(name_coldef)
client.system_update_column_family(cfdef)
# Now drop the indexes
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name == 'BlankCF2'][0]
birthdate_coldef = ColumnDef(utf8encode('birthdate'), 'BytesType', None, None)
age_coldef = ColumnDef(utf8encode('age'), 'BytesType', None, None)
name_coldef = ColumnDef(utf8encode('name'), 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef, age_coldef, name_coldef]
client.system_update_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name == 'BlankCF'][0]
birthdate_coldef = ColumnDef(utf8encode('birthdate'), 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef]
client.system_update_column_family(cfdef)
client.system_drop_column_family('BlankCF')
client.system_drop_column_family('BlankCF2')
def test_dynamic_indexes_with_system_update_cf(self):
_set_keyspace('Keyspace1')
cd = ColumnDef(utf8encode('birthdate'), 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'ToBeIndexed', default_validation_class='LongType', column_metadata=[cd])
client.system_add_column_family(newcf)
client.insert(utf8encode('key1'), ColumnParent('ToBeIndexed'), Column(utf8encode('birthdate'), _i64(1), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key2'), ColumnParent('ToBeIndexed'), Column(utf8encode('birthdate'), _i64(2), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key2'), ColumnParent('ToBeIndexed'), Column(utf8encode('b'), _i64(2), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key3'), ColumnParent('ToBeIndexed'), Column(utf8encode('birthdate'), _i64(3), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key3'), ColumnParent('ToBeIndexed'), Column(utf8encode('b'), _i64(3), 0), ConsistencyLevel.ONE)
# First without index
cp = ColumnParent('ToBeIndexed')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode('')))
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('birthdate'), IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == utf8encode('key1')
assert len(result[0].columns) == 1, result[0].columns
# add an index on 'birthdate'
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name == 'ToBeIndexed'][0]
modified_cd = ColumnDef(utf8encode('birthdate'), 'BytesType', IndexType.KEYS, 'bd_index')
modified_cf = CfDef('Keyspace1', 'ToBeIndexed', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name == 'ToBeIndexed'][0]
assert server_cf
assert server_cf.column_metadata[0].index_type == modified_cd.index_type
assert server_cf.column_metadata[0].index_name == modified_cd.index_name
# sleep a bit to give time for the index to build.
time.sleep(5)
# repeat query on one index expression
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == utf8encode('key1')
assert len(result[0].columns) == 1, result[0].columns
def test_system_super_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef(utf8encode('ValidationColumn'), 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewSuperColumnFamily', 'Super', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' in [x.name for x in ks1.cf_defs]
# drop
client.system_drop_column_family('NewSuperColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
def test_insert_ttl(self):
self._base_insert_ttl()
def test_insert_max_ttl(self):
self._base_insert_ttl(ttl=MAX_TTL, max_default_ttl=False)
def test_insert_max_default_ttl(self):
self._base_insert_ttl(ttl=None, max_default_ttl=True)
def _base_insert_ttl(self, ttl=5, max_default_ttl=False):
""" Test simple insertion of a column with max ttl """
_set_keyspace('Keyspace1')
cf = 'ExpiringMaxTTL' if max_default_ttl else 'Standard1'
logprefix = 'default ' if max_default_ttl else ''
self.truncate_all(cf)
node1 = self.cluster.nodelist()[0]
mark = node1.mark_log()
column = Column(utf8encode('cttl1'), utf8encode('value1'), 0, ttl)
expected = Column(utf8encode('cttl1'), utf8encode('value1'), 0, MAX_TTL) if max_default_ttl else column
client.insert(utf8encode('key1'), ColumnParent(cf), column, ConsistencyLevel.ONE)
assert client.get(utf8encode('key1'), ColumnPath(cf, column=utf8encode('cttl1')), ConsistencyLevel.ONE).column == expected
if ttl and ttl < MAX_TTL:
assert not node1.grep_log("exceeds maximum supported expiration", from_mark=mark), "Should not print max expiration date exceeded warning"
else:
node1.watch_log_for("Request on table {}.{} with {}ttl of {} seconds exceeds maximum supported expiration"
.format('Keyspace1', cf, logprefix, MAX_TTL), timeout=10)
def test_simple_expiration(self):
""" Test that column ttled do expires """
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
column = Column(utf8encode('cttl3'), utf8encode('value1'), 0, 2)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
c = client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl3')), ConsistencyLevel.ONE).column
assert c == column
time.sleep(3)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl3')), ConsistencyLevel.ONE))
def test_expiration_with_default_ttl(self):
""" Test that column with default ttl do expires """
_set_keyspace('Keyspace1')
self.truncate_all('Expiring')
column = Column(utf8encode('cttl3'), utf8encode('value1'), 0)
client.insert(utf8encode('key1'), ColumnParent('Expiring'), column, ConsistencyLevel.ONE)
client.get(utf8encode('key1'), ColumnPath('Expiring', column=utf8encode('cttl3')), ConsistencyLevel.ONE).column
time.sleep(3)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Expiring', column=utf8encode('cttl3')), ConsistencyLevel.ONE))
@since('3.6', max_version='4')
def test_expiration_with_default_ttl_and_zero_ttl(self):
"""
Test that we can remove the default ttl by setting the ttl explicitly to zero
CASSANDRA-11207
"""
_set_keyspace('Keyspace1')
self.truncate_all('Expiring')
column = Column(utf8encode('cttl3'), utf8encode('value1'), 0, 0)
client.insert(utf8encode('key1'), ColumnParent('Expiring'), column, ConsistencyLevel.ONE)
c = client.get(utf8encode('key1'), ColumnPath('Expiring', column=utf8encode('cttl3')), ConsistencyLevel.ONE).column
assert Column(utf8encode('cttl3'), utf8encode('value1'), 0) == c
def test_simple_expiration_batch_mutate(self):
""" Test that column ttled do expires using batch_mutate """
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
column = Column(utf8encode('cttl4'), utf8encode('value1'), 0, 2)
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(column))]}
client.batch_mutate({utf8encode('key1'): cfmap}, ConsistencyLevel.ONE)
c = client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl4')), ConsistencyLevel.ONE).column
assert c == column
time.sleep(3)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl4')), ConsistencyLevel.ONE))
def test_update_expiring(self):
""" Test that updating a column with ttl override the ttl """
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
column1 = Column(utf8encode('cttl4'), utf8encode('value1'), 0, 1)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), column1, ConsistencyLevel.ONE)
column2 = Column(utf8encode('cttl4'), utf8encode('value1'), 1)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), column2, ConsistencyLevel.ONE)
time.sleep(1.5)
assert client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl4')), ConsistencyLevel.ONE).column == column2
def test_remove_expiring(self):
""" Test removing a column with ttl """
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
column = Column(utf8encode('cttl5'), utf8encode('value1'), 0, 10)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl5')), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('ctt5')), ConsistencyLevel.ONE))
def test_describe_ring_on_invalid_keyspace(self):
def req():
client.describe_ring('system')
_expect_exception(req, InvalidRequestException)
def test_incr_decr_standard_add(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
d1 = 12
d2 = -21
d3 = 35
# insert positive and negative values and check the counts
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get(key, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get(key, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1 + d2)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get(key, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1 + d2 + d3)
def test_incr_decr_super_add(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
d1 = -234
d2 = 52345
d3 = 3123
client.add(key, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c2'), d2), ConsistencyLevel.ONE)
rv1 = client.get(key, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1')), ConsistencyLevel.ONE)
assert rv1.counter_super_column.columns[0].value == d1
assert rv1.counter_super_column.columns[1].value == d2
client.add(key, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d2), ConsistencyLevel.ONE)
rv2 = client.get(key, ColumnPath('SuperCounter1', utf8encode('sc1'), utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1 + d2)
client.add(key, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d3), ConsistencyLevel.ONE)
rv3 = client.get(key, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1 + d2 + d3)
def test_incr_standard_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 124
# insert value and check it exists
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter(key2, ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
def test_incr_super_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 52345
# insert value and check it exists
client.add(key1, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
client.add(key2, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')))
def test_incr_decr_standard_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 124
# insert value and check it exists
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter(key2, ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
def test_incr_decr_super_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 52345
# insert value and check it exists
client.add(key1, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
client.add(key2, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')))
def test_incr_decr_standard_batch_add(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
d1 = 12
d2 = -21
update_map = {key: {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d2))),
]}}
# insert positive and negative values and check the counts
client.batch_mutate(update_map, ConsistencyLevel.ONE)
rv1 = client.get(key, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1 + d2
def test_incr_decr_standard_batch_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 12
d2 = -21
# insert positive and negative values and check the counts
update_map = {key1: {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1 + d2
# remove the previous column and check that it is gone
update_map = {key1: {'Counter1': [
Mutation(deletion=Deletion(predicate=SlicePredicate(column_names=[utf8encode('c1')]))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
update_map = {key2: {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1 + d2
update_map = {key2: {'Counter1': [
Mutation(deletion=Deletion()),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
# known failure: see CASSANDRA-10046
def test_range_deletion(self):
""" Tests CASSANDRA-7990 """
_set_keyspace('Keyspace1')
self.truncate_all('StandardComposite')
for i in range(10):
column_name = composite(str(i), str(i))
column = Column(column_name, utf8encode('value'), int(time.time() * 1000))
client.insert(utf8encode('key1'), ColumnParent('StandardComposite'), column, ConsistencyLevel.ONE)
delete_slice = SlicePredicate(slice_range=SliceRange(composite('3', eoc=b'\xff'), composite('6', b'\x01'), False, 100))
mutations = [Mutation(deletion=Deletion(int(time.time() * 1000), predicate=delete_slice))]
keyed_mutations = {utf8encode('key1'): {'StandardComposite': mutations}}
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
slice_predicate = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 100))
results = client.get_slice(utf8encode('key1'), ColumnParent('StandardComposite'), slice_predicate, ConsistencyLevel.ONE)
columns = [result.column.name for result in results]
assert columns == [composite('0', '0'), composite('1', '1'), composite('2', '2'),
composite('6', '6'), composite('7', '7'), composite('8', '8'), composite('9', '9')]
@pytest.mark.skip_version('3.9')
def test_range_deletion_eoc_0(self):
"""
This test confirms that a range tombstone with a final EOC of 0
results in a exclusive deletion except for cells that exactly match the tombstone bound.
@jira_ticket CASSANDRA-12423
"""
_set_keyspace('Keyspace1')
self.truncate_all('StandardComposite')
for i in range(10):
column_name = composite(str(i), str(i))
column = Column(column_name, utf8encode('value'), int(time.time() * 1000))
client.insert(utf8encode('key1'), ColumnParent('StandardComposite'), column, ConsistencyLevel.ONE)
# insert a partial cell name (just the first element of the composite)
column_name = composite('6', None, eoc=b'\x00')
column = Column(column_name, utf8encode('value'), int(time.time() * 1000))
client.insert(utf8encode('key1'), ColumnParent('StandardComposite'), column, ConsistencyLevel.ONE)
# sanity check the query
slice_predicate = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 100))
results = client.get_slice(utf8encode('key1'), ColumnParent('StandardComposite'), slice_predicate, ConsistencyLevel.ONE)
columns = [result.column.name for result in results]
assert columns == [composite('0', '0'), composite('1', '1'), composite('2', '2'), composite('3', '3'), composite('4', '4'), composite('5', '5'),
composite('6'),
composite('6', '6'),
composite('7', '7'), composite('8', '8'), composite('9', '9')]
# do a slice deletion with (6, ) as the end
delete_slice = SlicePredicate(slice_range=SliceRange(composite('3', eoc=b'\xff'), composite('6', b'\x00'), False, 100))
mutations = [Mutation(deletion=Deletion(int(time.time() * 1000), predicate=delete_slice))]
keyed_mutations = {utf8encode('key1'): {'StandardComposite': mutations}}
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
# check the columns post-deletion, (utf8encode('6'), ) because it is an exact much but not (6, 6)
results = client.get_slice(utf8encode('key1'), ColumnParent('StandardComposite'), slice_predicate, ConsistencyLevel.ONE)
columns = [result.column.name for result in results]
assert columns == [composite('0', '0'), composite('1', '1'), composite('2', '2'),
composite('6', '6'),
composite('7', '7'), composite('8', '8'), composite('9', '9')]
# do another slice deletion, but make the end (6, 6) this time
delete_slice = SlicePredicate(slice_range=SliceRange(composite('3', eoc=b'\xff'), composite('6', '6', b'\x00'), False, 100))
mutations = [Mutation(deletion=Deletion(int(time.time() * 1000), predicate=delete_slice))]
keyed_mutations = {utf8encode('key1'): {'StandardComposite': mutations}}
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
# check the columns post-deletion, now (6, 6) is also gone
results = client.get_slice(utf8encode('key1'), ColumnParent('StandardComposite'), slice_predicate, ConsistencyLevel.ONE)
columns = [result.column.name for result in results]
assert columns == [composite('0', '0'), composite('1', '1'), composite('2', '2'),
composite('7', '7'), composite('8', '8'), composite('9', '9')]
def test_incr_decr_standard_slice(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
d1 = 12
d2 = -21
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c2'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d2), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c4'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c5'), d1), ConsistencyLevel.ONE)
# insert positive and negative values and check the counts
counters = client.get_slice(key, ColumnParent('Counter1'), SlicePredicate([utf8encode('c3'), utf8encode('c4')]), ConsistencyLevel.ONE)
assert counters[0].counter_column.value == d1 + d2
assert counters[1].counter_column.value == d1
def test_incr_decr_standard_multiget_slice(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 12
d2 = -21
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c2'), d1), ConsistencyLevel.ONE)
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d1), ConsistencyLevel.ONE)
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d2), ConsistencyLevel.ONE)
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c4'), d1), ConsistencyLevel.ONE)
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c5'), d1), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c2'), d1), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d1), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d2), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c4'), d1), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c5'), d1), ConsistencyLevel.ONE)
# insert positive and negative values and check the counts
counters = client.multiget_slice([key1, key2], ColumnParent('Counter1'), SlicePredicate([utf8encode('c3'), utf8encode('c4')]), ConsistencyLevel.ONE)
assert counters[key1][0].counter_column.value == d1 + d2
assert counters[key1][1].counter_column.value == d1
assert counters[key2][0].counter_column.value == d1 + d2
assert counters[key2][1].counter_column.value == d1
def test_counter_get_slice_range(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
client.add(key, ColumnParent('Counter1'), CounterColumn(utf8encode('c1'), 1), ConsistencyLevel.ONE)
client.add(key, ColumnParent('Counter1'), CounterColumn(utf8encode('c2'), 2), ConsistencyLevel.ONE)
client.add(key, ColumnParent('Counter1'), CounterColumn(utf8encode('c3'), 3), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange(utf8encode('c1'), utf8encode('c2'), False, 1000))
result = client.get_slice(key, ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == utf8encode('c1')
assert result[1].counter_column.name == utf8encode('c2')
p = SlicePredicate(slice_range=SliceRange(utf8encode('c3'), utf8encode('c2'), True, 1000))
result = client.get_slice(key, ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == utf8encode('c3')
assert result[1].counter_column.name == utf8encode('c2')
p = SlicePredicate(slice_range=SliceRange(utf8encode('a'), utf8encode('z'), False, 1000))
result = client.get_slice(key, ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange(utf8encode('a'), utf8encode('z'), False, 2))
result = client.get_slice(key, ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def test_counter_get_slice_super_range(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
client.add(key, ColumnParent('SuperCounter1', utf8encode('sc1')), CounterColumn(_i64(4), 4), ConsistencyLevel.ONE)
client.add(key, ColumnParent('SuperCounter1', utf8encode('sc2')), CounterColumn(_i64(5), 5), ConsistencyLevel.ONE)
client.add(key, ColumnParent('SuperCounter1', utf8encode('sc2')), CounterColumn(_i64(6), 6), ConsistencyLevel.ONE)
client.add(key, ColumnParent('SuperCounter1', utf8encode('sc3')), CounterColumn(_i64(7), 7), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc2'), utf8encode('sc3'), False, 2))
result = client.get_slice(key, ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == utf8encode('sc2')
assert result[1].counter_super_column.name == utf8encode('sc3')
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc3'), utf8encode('sc2'), True, 2))
result = client.get_slice(key, ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == utf8encode('sc3')
assert result[1].counter_super_column.name == utf8encode('sc2')
def test_index_scan(self):
_set_keyspace('Keyspace1')
self.truncate_all('Indexed1')
client.insert(utf8encode('key1'), ColumnParent('Indexed1'), Column(utf8encode('birthdate'), _i64(1), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key2'), ColumnParent('Indexed1'), Column(utf8encode('birthdate'), _i64(2), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key2'), ColumnParent('Indexed1'), Column(utf8encode('b'), _i64(2), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key3'), ColumnParent('Indexed1'), Column(utf8encode('birthdate'), _i64(3), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key3'), ColumnParent('Indexed1'), Column(utf8encode('b'), _i64(3), 0), ConsistencyLevel.ONE)
# simple query on one index expression
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode('')))
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('birthdate'), IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == utf8encode('key1')
assert len(result[0].columns) == 1, result[0].columns
# without index
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('b'), IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
# but unindexed expression added to indexed one is ok
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('b'), IndexOperator.EQ, _i64(3)), IndexExpression(utf8encode('birthdate'), IndexOperator.EQ, _i64(3))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == utf8encode('key3')
assert len(result[0].columns) == 2, result[0].columns
def test_index_scan_uuid_names(self):
_set_keyspace('Keyspace1')
self.truncate_all('Indexed3')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode('')))
cp = ColumnParent('Indexed3') # timeuuid name, utf8 values
u = uuid.UUID('00000000-0000-1000-0000-000000000000').bytes
u2 = uuid.UUID('00000000-0000-1000-0000-000000000001').bytes
client.insert(utf8encode('key1'), ColumnParent('Indexed3'), Column(u, utf8encode('a'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Indexed3'), Column(u2, utf8encode('b'), 0), ConsistencyLevel.ONE)
# name comparator + data validator of incompatible types -- see CASSANDRA-2347
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(u, IndexOperator.EQ, utf8encode('a')), IndexExpression(u2, IndexOperator.EQ, utf8encode('b'))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
cp = ColumnParent('Indexed2') # timeuuid name, long values
# name must be valid (TimeUUID)
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('foo'), IndexOperator.EQ, uuid.UUID('00000000-0000-1000-0000-000000000000').bytes)], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
# value must be valid (TimeUUID)
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, IndexOperator.EQ, utf8encode("foo"))], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
def test_index_scan_expiring(self):
""" Test that column ttled expires from KEYS index"""
_set_keyspace('Keyspace1')
self.truncate_all('Indexed1')
client.insert(utf8encode('key1'), ColumnParent('Indexed1'), Column(utf8encode('birthdate'), _i64(1), 0, 2), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode('')))
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('birthdate'), IndexOperator.EQ, _i64(1))], 100)
# query before expiration
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
# wait for expiration and requery
time.sleep(3)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
def test_index_scan_indexed_column_outside_slice_predicate(self):
"""
Verify that performing an indexed read works when the indexed column
is not included in the slice predicate. Checks both cases where the
predicate contains a slice range or a set of column names, which
translate to slice and names queries server-side.
@jira_ticket CASSANDRA-11523
"""
_set_keyspace('Keyspace1')
self.truncate_all('Indexed4')
client.insert(utf8encode('key1'), ColumnParent('Indexed4'), Column(utf8encode('a'), _i64(1), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Indexed4'), Column(utf8encode('z'), utf8encode('zzz'), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed4')
sp = SlicePredicate(slice_range=SliceRange(utf8encode('z'), utf8encode('z')))
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('a'), IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].column.name == utf8encode('z')
sp = SlicePredicate(column_names=[utf8encode('z')])
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].column.name == utf8encode('z')
def test_column_not_found_quorum(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
key = utf8encode('doesntexist')
column_path = ColumnPath(column_family="Standard1", column=utf8encode("idontexist"))
try:
client.get(key, column_path, ConsistencyLevel.QUORUM)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def test_get_range_slice_after_deletion(self):
_set_keyspace('Keyspace2')
self.truncate_all('Super3')
key = utf8encode('key1')
# three supercoluns, each with "col1" subcolumn
for i in range(1, 4):
client.insert(key, ColumnParent('Super3', utf8encode('sc%d' % i)), Column(utf8encode('col1'), utf8encode('val1'), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3')
predicate = SlicePredicate(slice_range=SliceRange(utf8encode('sc1'), utf8encode('sc3'), False, count=1))
k_range = KeyRange(start_key=key, end_key=key, count=1)
# validate count=1 restricts to 1 supercolumn
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
# remove sc1; add back subcolumn to override tombstone
client.remove(key, ColumnPath('Super3', utf8encode('sc1')), 1, ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
client.insert(key, ColumnParent('Super3', utf8encode('sc1')), Column(utf8encode('col1'), utf8encode('val1'), 2), ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].super_column.name == utf8encode('sc1')
def test_multi_slice(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_six_columns('abc')
L = [result.column
for result in _big_multi_slice('abc')]
assert L == _MULTI_SLICE_COLUMNS, L
def test_truncate(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# truncate Standard1
self.truncate_all('Standard1')
assert _big_slice(utf8encode('key1'), ColumnParent('Standard1')) == []
# truncate Super1
self.truncate_all('Super1')
assert _big_slice(utf8encode('key1'), ColumnParent('Super1')) == []
assert _big_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1'))) == []
@since('3.0', max_version='4')
def test_cql_range_tombstone_and_static(self):
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
# Create a CQL table with a static column and insert a row
session.execute('USE "Keyspace1"')
session.execute("CREATE TABLE t (k text, s text static, t text, v text, PRIMARY KEY (k, t))")
session.execute("INSERT INTO t (k, s, t, v) VALUES ('k', 's', 't', 'v') USING TIMESTAMP 0")
assert_one(session, "SELECT * FROM t", ['k', 't', 's', 'v'])
# Now submit a range deletion that should include both the row and the static value
_set_keyspace('Keyspace1')
mutations = [Mutation(deletion=Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))))]
mutation_map = dict((table, mutations) for table in ['t'])
keyed_mutations = dict((key, mutation_map) for key in [utf8encode('k')])
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
# And check everything is gone
assert_none(session, "SELECT * FROM t")
def test_compact_storage_get(self):
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
# Create a CQL table with a static column and insert a row
session.execute("USE \"Keyspace1\"")
session.execute("CREATE TABLE IF NOT EXISTS cs1 (k int PRIMARY KEY,v int) WITH COMPACT STORAGE")
_set_keyspace('Keyspace1')
CL = ConsistencyLevel.ONE
i = 1
client.insert(_i32(i), ColumnParent('cs1'), Column(utf8encode('v'), _i32(i), 0), CL)
_assert_column('cs1', _i32(i), utf8encode('v'), _i32(i), 0)
@pytest.mark.skip_version('3.9')
def test_range_tombstone_eoc_0(self):
"""
Insert a range tombstone with EOC=0 for a compact storage table. Insert 2 rows that
are just outside the range and check that they are present.
@jira_ticket CASSANDRA-12423
"""
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.execute('USE "Keyspace1"')
session.execute("CREATE TABLE test (id INT, c1 TEXT, c2 TEXT, v INT, PRIMARY KEY (id, c1, c2)) "
"with compact storage and compression = {'sstable_compression': ''};")
_set_keyspace('Keyspace1')
range_delete = {
_i32(1): {
'test': [Mutation(deletion=Deletion(2470761440040513,
predicate=SlicePredicate(slice_range=SliceRange(
start=composite('a'), finish=composite('asd')))))]
}
}
client.batch_mutate(range_delete, ConsistencyLevel.ONE)
session.execute("INSERT INTO test (id, c1, c2, v) VALUES (1, 'asd', '', 0) USING TIMESTAMP 1470761451368658")
session.execute("INSERT INTO test (id, c1, c2, v) VALUES (1, 'asd', 'asd', 0) USING TIMESTAMP 1470761449416613")
ret = list(session.execute('SELECT * FROM test'))
assert 2 == len(ret)
node1.nodetool('flush Keyspace1 test')
| apache-2.0 | -767,238,346,867,501,800 | -5,682,584,372,652,974,000 | 53.429044 | 624 | 0.637703 | false |
raychorn/knowu | django/djangononrelsample2/django/core/management/templates.py | 102 | 12715 | import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
try:
from urllib.request import urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve
from optparse import make_option
from os import path
import django
from django.template import Template, Context
from django.utils import archive
from django.utils._os import rmtree_errorhandler
from django.core.management.base import BaseCommand, CommandError
from django.core.management.commands.makemessages import handle_extensions
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
args = "[name] [optional destination directory]"
option_list = BaseCommand.option_list + (
make_option('--template',
action='store', dest='template',
help='The dotted import path to load the template from.'),
make_option('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'),
make_option('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
)
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = int(options.get('verbosity'))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = ('make sure the name begins '
'with a letter or underscore')
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(
handle_extensions(options.get('extensions'), ignored=()))
extra_files = []
for file in options.get('files'):
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
}), autoescape=False)
# Setup a stub settings environment for template rendering
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options.get('template'),
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Template(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove,
onerror=rmtree_errorhandler)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognnized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| lgpl-3.0 | 3,884,438,668,225,320,000 | -7,458,228,528,810,090,000 | 39.365079 | 79 | 0.546677 | false |
guru-digital/CouchPotatoServer | libs/suds/client.py | 150 | 25971 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
| gpl-3.0 | -7,878,841,673,921,142,000 | -2,345,943,322,204,025,300 | 32.084076 | 84 | 0.567479 | false |
highweb-project/highweb-webcl-html5spec | third_party/pycoverage/coverage/report.py | 214 | 3031 | """Reporter foundation for Coverage."""
import fnmatch, os
from coverage.codeunit import code_unit_factory
from coverage.files import prep_patterns
from coverage.misc import CoverageException, NoSource, NotPython
class Reporter(object):
"""A base class for all reporters."""
def __init__(self, coverage, config):
"""Create a reporter.
`coverage` is the coverage instance. `config` is an instance of
CoverageConfig, for controlling all sorts of behavior.
"""
self.coverage = coverage
self.config = config
# The code units to report on. Set by find_code_units.
self.code_units = []
# The directory into which to place the report, used by some derived
# classes.
self.directory = None
def find_code_units(self, morfs):
"""Find the code units we'll report on.
`morfs` is a list of modules or filenames.
"""
morfs = morfs or self.coverage.data.measured_files()
file_locator = self.coverage.file_locator
self.code_units = code_unit_factory(morfs, file_locator)
if self.config.include:
patterns = prep_patterns(self.config.include)
filtered = []
for cu in self.code_units:
for pattern in patterns:
if fnmatch.fnmatch(cu.filename, pattern):
filtered.append(cu)
break
self.code_units = filtered
if self.config.omit:
patterns = prep_patterns(self.config.omit)
filtered = []
for cu in self.code_units:
for pattern in patterns:
if fnmatch.fnmatch(cu.filename, pattern):
break
else:
filtered.append(cu)
self.code_units = filtered
self.code_units.sort()
def report_files(self, report_fn, morfs, directory=None):
"""Run a reporting function on a number of morfs.
`report_fn` is called for each relative morf in `morfs`. It is called
as::
report_fn(code_unit, analysis)
where `code_unit` is the `CodeUnit` for the morf, and `analysis` is
the `Analysis` for the morf.
"""
self.find_code_units(morfs)
if not self.code_units:
raise CoverageException("No data to report.")
self.directory = directory
if self.directory and not os.path.exists(self.directory):
os.makedirs(self.directory)
for cu in self.code_units:
try:
report_fn(cu, self.coverage._analyze(cu))
except NoSource:
if not self.config.ignore_errors:
raise
except NotPython:
# Only report errors for .py files, and only if we didn't
# explicitly suppress those errors.
if cu.should_be_python() and not self.config.ignore_errors:
raise
| bsd-3-clause | -6,897,650,591,605,988,000 | 2,509,899,022,038,871,600 | 31.945652 | 78 | 0.571429 | false |
google/carfac | python/tf/car_saveload_test.py | 1 | 1728 | # Lint as: python3
#!/usr/bin/env python
# Copyright 2021 The CARFAC Authors. All Rights Reserved.
#
# This file is part of an implementation of Lyon's cochlear model:
# "Cascade of Asymmetric Resonators with Fast-Acting Compression"
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for carfac.tf.pz."""
import tempfile
import unittest
from absl import app
import numpy as np
import tensorflow as tf
from . import car
class CARTest(unittest.TestCase):
def testSaveLoad(self):
car_cell = car.CARCell()
car_layer = tf.keras.layers.RNN(car_cell, return_sequences=True)
model = tf.keras.Sequential()
model.add(car_layer)
impulse: np.ndarray = np.zeros([3, 10, 1], dtype=np.float32)
impulse[:, 0, :] = 1
impulse: tf.Tensor = tf.constant(impulse)
model.build(impulse.shape)
with tempfile.TemporaryDirectory() as savefile:
model.save(savefile)
loaded_model: tf.keras.models.Model = tf.keras.models.load_model(
savefile, custom_objects={'CARCell': car.CARCell})
np.testing.assert_array_almost_equal(model(impulse),
loaded_model(impulse))
def main(_):
unittest.main()
if __name__ == '__main__':
app.run(main)
| apache-2.0 | -4,572,994,097,901,489,700 | -2,117,428,934,104,454,000 | 30.418182 | 74 | 0.699653 | false |
thaim/ansible | lib/ansible/modules/network/f5/bigip_gtm_datacenter.py | 38 | 14466 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP.
version_added: 2.2
options:
contact:
description:
- The name of the contact for the data center.
type: str
description:
description:
- The description of the data center.
type: str
location:
description:
- The location of the data center.
type: str
name:
description:
- The name of the data center.
type: str
required: True
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
type: str
choices:
- present
- absent
- enabled
- disabled
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create data center "New York"
bigip_gtm_datacenter:
name: New York
location: 222 West 23rd
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
contact:
description: The contact that was set on the datacenter.
returned: changed
type: str
sample: [email protected]
description:
description: The description that was set for the datacenter.
returned: changed
type: str
sample: Datacenter in NYC
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
disabled:
description: Whether the datacenter is disabled or not.
returned: changed
type: bool
sample: true
state:
description: State of the datacenter.
returned: changed
type: str
sample: disabled
location:
description: The location that is set for the datacenter.
returned: changed
type: str
sample: 222 West 23rd
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {}
updatables = [
'location', 'description', 'contact', 'state',
]
returnables = [
'location', 'description', 'contact', 'state', 'enabled', 'disabled',
]
api_attributes = [
'enabled', 'location', 'description', 'contact', 'disabled',
]
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return None
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return None
class ModuleParameters(Parameters):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
return None
@property
def state(self):
if self.enabled and self._values['state'] != 'present':
return 'enabled'
elif self.disabled and self._values['state'] != 'present':
return 'disabled'
else:
return self._values['state']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
class ReportableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['enabled', 'present']:
return False
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
elif self._values['state'] == 'disabled':
return False
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.enabled != self.have.enabled:
return dict(
state=self.want.state,
enabled=self.want.enabled
)
if self.want.disabled != self.have.disabled:
return dict(
state=self.want.state,
disabled=self.want.disabled
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def create(self):
self.have = ApiParameters()
self.should_update()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the datacenter")
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
contact=dict(),
description=dict(),
location=dict(),
name=dict(required=True),
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| mit | -2,936,990,654,726,126,000 | -8,630,701,853,614,713,000 | 28.522449 | 91 | 0.594567 | false |
secynic/ipwhois | ipwhois/tests/online/test_rdap.py | 1 | 1923 | import json
import io
from os import path
import logging
from ipwhois.tests import TestCommon
from ipwhois.exceptions import (HTTPLookupError, HTTPRateLimitError)
from ipwhois.rdap import (RDAP, Net)
LOG_FORMAT = ('[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)s] '
'[%(funcName)s()] %(message)s')
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
log = logging.getLogger(__name__)
class TestRDAP(TestCommon):
def test_lookup(self):
data_dir = path.abspath(path.join(path.dirname(__file__), '..'))
with io.open(str(data_dir) + '/rdap.json', 'r') as data_file:
data = json.load(data_file)
for key, val in data.items():
log.debug('Testing: {0}'.format(key))
net = Net(key)
obj = RDAP(net)
try:
self.assertIsInstance(obj.lookup(asn_data=val['asn_data'],
depth=1), dict)
except (HTTPLookupError, HTTPRateLimitError):
pass
except AssertionError as e:
raise e
except Exception as e:
self.fail('Unexpected exception raised: {0}'.format(e))
for key, val in data.items():
log.debug('Testing bootstrap and raw: {0}'.format(key))
net = Net(key)
obj = RDAP(net)
try:
self.assertIsInstance(obj.lookup(asn_data=val['asn_data'],
depth=3,
bootstrap=True,
inc_raw=True), dict)
except (HTTPLookupError, HTTPRateLimitError):
pass
except AssertionError as e:
raise e
except Exception as e:
self.fail('Unexpected exception raised: {0}'.format(e))
| bsd-2-clause | 8,979,366,100,480,578,000 | 3,171,093,568,636,113,000 | 26.471429 | 74 | 0.512741 | false |
gdimitris/ChessPuzzler | Virtual_Environment/lib/python2.7/site-packages/migrate/tests/changeset/test_changeset.py | 66 | 36587 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlalchemy
import warnings
from sqlalchemy import *
from migrate import changeset, exceptions
from migrate.changeset import *
from migrate.changeset import constraint
from migrate.changeset.schema import ColumnDelta
from migrate.tests import fixture
from migrate.tests.fixture.warnings import catch_warnings
import six
class TestAddDropColumn(fixture.DB):
"""Test add/drop column through all possible interfaces
also test for constraints
"""
level = fixture.DB.CONNECT
table_name = 'tmp_adddropcol'
table_name_idx = 'tmp_adddropcol_idx'
table_int = 0
def _setup(self, url):
super(TestAddDropColumn, self)._setup(url)
self.meta = MetaData()
self.table = Table(self.table_name, self.meta,
Column('id', Integer, unique=True),
)
self.table_idx = Table(
self.table_name_idx,
self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('b', Integer),
Index('test_idx', 'a', 'b')
)
self.meta.bind = self.engine
if self.engine.has_table(self.table.name):
self.table.drop()
if self.engine.has_table(self.table_idx.name):
self.table_idx.drop()
self.table.create()
self.table_idx.create()
def _teardown(self):
if self.engine.has_table(self.table.name):
self.table.drop()
if self.engine.has_table(self.table_idx.name):
self.table_idx.drop()
self.meta.clear()
super(TestAddDropColumn,self)._teardown()
def run_(self, create_column_func, drop_column_func, *col_p, **col_k):
col_name = 'data'
def assert_numcols(num_of_expected_cols):
# number of cols should be correct in table object and in database
self.refresh_table(self.table_name)
result = len(self.table.c)
self.assertEqual(result, num_of_expected_cols),
if col_k.get('primary_key', None):
# new primary key: check its length too
result = len(self.table.primary_key)
self.assertEqual(result, num_of_expected_cols)
# we have 1 columns and there is no data column
assert_numcols(1)
self.assertTrue(getattr(self.table.c, 'data', None) is None)
if len(col_p) == 0:
col_p = [String(40)]
col = Column(col_name, *col_p, **col_k)
create_column_func(col)
assert_numcols(2)
# data column exists
self.assertTrue(self.table.c.data.type.length, 40)
col2 = self.table.c.data
drop_column_func(col2)
assert_numcols(1)
@fixture.usedb()
def test_undefined(self):
"""Add/drop columns not yet defined in the table"""
def add_func(col):
return create_column(col, self.table)
def drop_func(col):
return drop_column(col, self.table)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_defined(self):
"""Add/drop columns already defined in the table"""
def add_func(col):
self.meta.clear()
self.table = Table(self.table_name, self.meta,
Column('id', Integer, primary_key=True),
col,
)
return create_column(col)
def drop_func(col):
return drop_column(col)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_method_bound(self):
"""Add/drop columns via column methods; columns bound to a table
ie. no table parameter passed to function
"""
def add_func(col):
self.assertTrue(col.table is None, col.table)
self.table.append_column(col)
return col.create()
def drop_func(col):
#self.assertTrue(col.table is None,col.table)
#self.table.append_column(col)
return col.drop()
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_method_notbound(self):
"""Add/drop columns via column methods; columns not bound to a table"""
def add_func(col):
return col.create(self.table)
def drop_func(col):
return col.drop(self.table)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_tablemethod_obj(self):
"""Add/drop columns via table methods; by column object"""
def add_func(col):
return self.table.create_column(col)
def drop_func(col):
return self.table.drop_column(col)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_tablemethod_name(self):
"""Add/drop columns via table methods; by column name"""
def add_func(col):
# must be bound to table
self.table.append_column(col)
return self.table.create_column(col.name)
def drop_func(col):
# Not necessarily bound to table
return self.table.drop_column(col.name)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_byname(self):
"""Add/drop columns via functions; by table object and column name"""
def add_func(col):
self.table.append_column(col)
return create_column(col.name, self.table)
def drop_func(col):
return drop_column(col.name, self.table)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_drop_column_not_in_table(self):
"""Drop column by name"""
def add_func(col):
return self.table.create_column(col)
def drop_func(col):
if SQLA_07:
self.table._columns.remove(col)
else:
self.table.c.remove(col)
return self.table.drop_column(col.name)
self.run_(add_func, drop_func)
@fixture.usedb()
def test_fk(self):
"""Can create columns with foreign keys"""
# create FK's target
reftable = Table('tmp_ref', self.meta,
Column('id', Integer, primary_key=True),
)
if self.engine.has_table(reftable.name):
reftable.drop()
reftable.create()
# create column with fk
col = Column('data', Integer, ForeignKey(reftable.c.id, name='testfk'))
col.create(self.table)
# check if constraint is added
for cons in self.table.constraints:
if isinstance(cons, sqlalchemy.schema.ForeignKeyConstraint):
break
else:
self.fail('No constraint found')
# TODO: test on db level if constraints work
if SQLA_07:
self.assertEqual(reftable.c.id.name,
list(col.foreign_keys)[0].column.name)
else:
self.assertEqual(reftable.c.id.name,
col.foreign_keys[0].column.name)
if self.engine.name == 'mysql':
constraint.ForeignKeyConstraint([self.table.c.data],
[reftable.c.id],
name='testfk').drop()
col.drop(self.table)
if self.engine.has_table(reftable.name):
reftable.drop()
@fixture.usedb(not_supported='sqlite')
def test_pk(self):
"""Can create columns with primary key"""
col = Column('data', Integer, nullable=False)
self.assertRaises(exceptions.InvalidConstraintError,
col.create, self.table, primary_key_name=True)
col.create(self.table, primary_key_name='data_pkey')
# check if constraint was added (cannot test on objects)
self.table.insert(values={'data': 4}).execute()
try:
self.table.insert(values={'data': 4}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
col.drop()
@fixture.usedb(not_supported=['mysql'])
def test_check(self):
"""Can create columns with check constraint"""
col = Column('foo',
Integer,
sqlalchemy.schema.CheckConstraint('foo > 4'))
col.create(self.table)
# check if constraint was added (cannot test on objects)
self.table.insert(values={'foo': 5}).execute()
try:
self.table.insert(values={'foo': 3}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
col.drop()
@fixture.usedb()
def test_unique_constraint(self):
self.assertRaises(exceptions.InvalidConstraintError,
Column('data', Integer, unique=True).create, self.table)
col = Column('data', Integer)
col.create(self.table, unique_name='data_unique')
# check if constraint was added (cannot test on objects)
self.table.insert(values={'data': 5}).execute()
try:
self.table.insert(values={'data': 5}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
col.drop(self.table)
# TODO: remove already attached columns with uniques, pks, fks ..
@fixture.usedb(not_supported=['ibm_db_sa', 'postgresql'])
def test_drop_column_of_composite_index(self):
# NOTE(rpodolyaka): postgresql automatically drops a composite index
# if one of its columns is dropped
# NOTE(mriedem): DB2 does the same.
self.table_idx.c.b.drop()
reflected = Table(self.table_idx.name, MetaData(), autoload=True,
autoload_with=self.engine)
index = next(iter(reflected.indexes))
self.assertEquals(['a'], [c.name for c in index.columns])
@fixture.usedb()
def test_drop_all_columns_of_composite_index(self):
self.table_idx.c.a.drop()
self.table_idx.c.b.drop()
reflected = Table(self.table_idx.name, MetaData(), autoload=True,
autoload_with=self.engine)
self.assertEquals(0, len(reflected.indexes))
def _check_index(self,expected):
if 'mysql' in self.engine.name or 'postgres' in self.engine.name:
for index in tuple(
Table(self.table.name, MetaData(),
autoload=True, autoload_with=self.engine).indexes
):
if index.name=='ix_data':
break
self.assertEqual(expected,index.unique)
@fixture.usedb()
def test_index(self):
col = Column('data', Integer)
col.create(self.table, index_name='ix_data')
self._check_index(False)
col.drop()
@fixture.usedb()
def test_index_unique(self):
# shows how to create a unique index
col = Column('data', Integer)
col.create(self.table)
Index('ix_data', col, unique=True).create(bind=self.engine)
# check if index was added
self.table.insert(values={'data': 5}).execute()
try:
self.table.insert(values={'data': 5}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
self._check_index(True)
col.drop()
@fixture.usedb()
def test_server_defaults(self):
"""Can create columns with server_default values"""
col = Column('data', String(244), server_default='foobar')
col.create(self.table)
self.table.insert(values={'id': 10}).execute()
row = self._select_row()
self.assertEqual(u'foobar', row['data'])
col.drop()
@fixture.usedb()
def test_populate_default(self):
"""Test populate_default=True"""
def default():
return 'foobar'
col = Column('data', String(244), default=default)
col.create(self.table, populate_default=True)
self.table.insert(values={'id': 10}).execute()
row = self._select_row()
self.assertEqual(u'foobar', row['data'])
col.drop()
# TODO: test sequence
# TODO: test quoting
# TODO: test non-autoname constraints
@fixture.usedb()
def test_drop_doesnt_delete_other_indexes(self):
# add two indexed columns
self.table.drop()
self.meta.clear()
self.table = Table(
self.table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('d1', String(10), index=True),
Column('d2', String(10), index=True),
)
self.table.create()
# paranoid check
self.refresh_table()
self.assertEqual(
sorted([i.name for i in self.table.indexes]),
[u'ix_tmp_adddropcol_d1', u'ix_tmp_adddropcol_d2']
)
# delete one
self.table.c.d2.drop()
# ensure the other index is still there
self.refresh_table()
self.assertEqual(
sorted([i.name for i in self.table.indexes]),
[u'ix_tmp_adddropcol_d1']
)
def _actual_foreign_keys(self):
from sqlalchemy.schema import ForeignKeyConstraint
result = []
for cons in self.table.constraints:
if isinstance(cons,ForeignKeyConstraint):
col_names = []
for col_name in cons.columns:
if not isinstance(col_name,six.string_types):
col_name = col_name.name
col_names.append(col_name)
result.append(col_names)
result.sort()
return result
@fixture.usedb()
def test_drop_with_foreign_keys(self):
self.table.drop()
self.meta.clear()
# create FK's target
reftable = Table('tmp_ref', self.meta,
Column('id', Integer, primary_key=True),
)
if self.engine.has_table(reftable.name):
reftable.drop()
reftable.create()
# add a table with two foreign key columns
self.table = Table(
self.table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('r1', Integer, ForeignKey('tmp_ref.id', name='test_fk1')),
Column('r2', Integer, ForeignKey('tmp_ref.id', name='test_fk2')),
)
self.table.create()
# paranoid check
self.assertEqual([['r1'],['r2']],
self._actual_foreign_keys())
# delete one
if self.engine.name == 'mysql':
constraint.ForeignKeyConstraint([self.table.c.r2], [reftable.c.id],
name='test_fk2').drop()
self.table.c.r2.drop()
# check remaining foreign key is there
self.assertEqual([['r1']],
self._actual_foreign_keys())
@fixture.usedb()
def test_drop_with_complex_foreign_keys(self):
from sqlalchemy.schema import ForeignKeyConstraint
from sqlalchemy.schema import UniqueConstraint
self.table.drop()
self.meta.clear()
# NOTE(mriedem): DB2 does not currently support unique constraints
# on nullable columns, so the columns that are used to create the
# foreign keys here need to be non-nullable for testing with DB2
# to work.
# create FK's target
reftable = Table('tmp_ref', self.meta,
Column('id', Integer, primary_key=True),
Column('jd', Integer, nullable=False),
UniqueConstraint('id','jd')
)
if self.engine.has_table(reftable.name):
reftable.drop()
reftable.create()
# add a table with a complex foreign key constraint
self.table = Table(
self.table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('r1', Integer, nullable=False),
Column('r2', Integer, nullable=False),
ForeignKeyConstraint(['r1','r2'],
[reftable.c.id,reftable.c.jd],
name='test_fk')
)
self.table.create()
# paranoid check
self.assertEqual([['r1','r2']],
self._actual_foreign_keys())
# delete one
if self.engine.name == 'mysql':
constraint.ForeignKeyConstraint([self.table.c.r1, self.table.c.r2],
[reftable.c.id, reftable.c.jd],
name='test_fk').drop()
self.table.c.r2.drop()
# check the constraint is gone, since part of it
# is no longer there - if people hit this,
# they may be confused, maybe we should raise an error
# and insist that the constraint is deleted first, separately?
self.assertEqual([],
self._actual_foreign_keys())
class TestRename(fixture.DB):
"""Tests for table and index rename methods"""
level = fixture.DB.CONNECT
meta = MetaData()
def _setup(self, url):
super(TestRename, self)._setup(url)
self.meta.bind = self.engine
@fixture.usedb(not_supported='firebird')
def test_rename_table(self):
"""Tables can be renamed"""
c_name = 'col_1'
table_name1 = 'name_one'
table_name2 = 'name_two'
index_name1 = 'x' + table_name1
index_name2 = 'x' + table_name2
self.meta.clear()
self.column = Column(c_name, Integer)
self.table = Table(table_name1, self.meta, self.column)
self.index = Index(index_name1, self.column, unique=False)
if self.engine.has_table(self.table.name):
self.table.drop()
if self.engine.has_table(table_name2):
tmp = Table(table_name2, self.meta, autoload=True)
tmp.drop()
tmp.deregister()
del tmp
self.table.create()
def assert_table_name(expected, skip_object_check=False):
"""Refresh a table via autoload
SA has changed some since this test was written; we now need to do
meta.clear() upon reloading a table - clear all rather than a
select few. So, this works only if we're working with one table at
a time (else, others will vanish too).
"""
if not skip_object_check:
# Table object check
self.assertEqual(self.table.name,expected)
newname = self.table.name
else:
# we know the object's name isn't consistent: just assign it
newname = expected
# Table DB check
self.meta.clear()
self.table = Table(newname, self.meta, autoload=True)
self.assertEqual(self.table.name, expected)
def assert_index_name(expected, skip_object_check=False):
if not skip_object_check:
# Index object check
self.assertEqual(self.index.name, expected)
else:
# object is inconsistent
self.index.name = expected
# TODO: Index DB check
def add_table_to_meta(name):
# trigger the case where table_name2 needs to be
# removed from the metadata in ChangesetTable.deregister()
tmp = Table(name, self.meta, Column(c_name, Integer))
tmp.create()
tmp.drop()
try:
# Table renames
assert_table_name(table_name1)
add_table_to_meta(table_name2)
rename_table(self.table, table_name2)
assert_table_name(table_name2)
self.table.rename(table_name1)
assert_table_name(table_name1)
# test by just the string
rename_table(table_name1, table_name2, engine=self.engine)
assert_table_name(table_name2, True) # object not updated
# Index renames
if self.url.startswith('sqlite') or self.url.startswith('mysql'):
self.assertRaises(exceptions.NotSupportedError,
self.index.rename, index_name2)
else:
assert_index_name(index_name1)
rename_index(self.index, index_name2, engine=self.engine)
assert_index_name(index_name2)
self.index.rename(index_name1)
assert_index_name(index_name1)
# test by just the string
rename_index(index_name1, index_name2, engine=self.engine)
assert_index_name(index_name2, True)
finally:
if self.table.exists():
self.table.drop()
class TestColumnChange(fixture.DB):
level = fixture.DB.CONNECT
table_name = 'tmp_colchange'
def _setup(self, url):
super(TestColumnChange, self)._setup(url)
self.meta = MetaData(self.engine)
self.table = Table(self.table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('data', String(40), server_default=DefaultClause("tluafed"),
nullable=True),
)
if self.table.exists():
self.table.drop()
try:
self.table.create()
except sqlalchemy.exc.SQLError:
# SQLite: database schema has changed
if not self.url.startswith('sqlite://'):
raise
def _teardown(self):
if self.table.exists():
try:
self.table.drop(self.engine)
except sqlalchemy.exc.SQLError:
# SQLite: database schema has changed
if not self.url.startswith('sqlite://'):
raise
super(TestColumnChange, self)._teardown()
@fixture.usedb()
def test_rename(self):
"""Can rename a column"""
def num_rows(col, content):
return len(list(self.table.select(col == content).execute()))
# Table content should be preserved in changed columns
content = "fgsfds"
self.engine.execute(self.table.insert(), data=content, id=42)
self.assertEqual(num_rows(self.table.c.data, content), 1)
# ...as a function, given a column object and the new name
alter_column('data', name='data2', table=self.table)
self.refresh_table()
alter_column(self.table.c.data2, name='atad')
self.refresh_table(self.table.name)
self.assertTrue('data' not in self.table.c.keys())
self.assertTrue('atad' in self.table.c.keys())
self.assertEqual(num_rows(self.table.c.atad, content), 1)
# ...as a method, given a new name
self.table.c.atad.alter(name='data')
self.refresh_table(self.table.name)
self.assertTrue('atad' not in self.table.c.keys())
self.table.c.data # Should not raise exception
self.assertEqual(num_rows(self.table.c.data, content), 1)
# ...as a function, given a new object
alter_column(self.table.c.data,
name = 'atad', type=String(40),
server_default=self.table.c.data.server_default)
self.refresh_table(self.table.name)
self.assertTrue('data' not in self.table.c.keys())
self.table.c.atad # Should not raise exception
self.assertEqual(num_rows(self.table.c.atad, content), 1)
# ...as a method, given a new object
self.table.c.atad.alter(
name='data',type=String(40),
server_default=self.table.c.atad.server_default
)
self.refresh_table(self.table.name)
self.assertTrue('atad' not in self.table.c.keys())
self.table.c.data # Should not raise exception
self.assertEqual(num_rows(self.table.c.data,content), 1)
@fixture.usedb()
def test_type(self):
# Test we can change a column's type
# Just the new type
self.table.c.data.alter(type=String(43))
self.refresh_table(self.table.name)
self.assertTrue(isinstance(self.table.c.data.type, String))
self.assertEqual(self.table.c.data.type.length, 43)
# Different type
self.assertTrue(isinstance(self.table.c.id.type, Integer))
self.assertEqual(self.table.c.id.nullable, False)
if not self.engine.name == 'firebird':
self.table.c.id.alter(type=String(20))
self.assertEqual(self.table.c.id.nullable, False)
self.refresh_table(self.table.name)
self.assertTrue(isinstance(self.table.c.id.type, String))
@fixture.usedb()
def test_default(self):
"""Can change a column's server_default value (DefaultClauses only)
Only DefaultClauses are changed here: others are managed by the
application / by SA
"""
self.assertEqual(self.table.c.data.server_default.arg, 'tluafed')
# Just the new default
default = 'my_default'
self.table.c.data.alter(server_default=DefaultClause(default))
self.refresh_table(self.table.name)
#self.assertEqual(self.table.c.data.server_default.arg,default)
# TextClause returned by autoload
self.assertTrue(default in str(self.table.c.data.server_default.arg))
self.engine.execute(self.table.insert(), id=12)
row = self._select_row()
self.assertEqual(row['data'], default)
# Column object
default = 'your_default'
self.table.c.data.alter(type=String(40), server_default=DefaultClause(default))
self.refresh_table(self.table.name)
self.assertTrue(default in str(self.table.c.data.server_default.arg))
# Drop/remove default
self.table.c.data.alter(server_default=None)
self.assertEqual(self.table.c.data.server_default, None)
self.refresh_table(self.table.name)
# server_default isn't necessarily None for Oracle
#self.assertTrue(self.table.c.data.server_default is None,self.table.c.data.server_default)
self.engine.execute(self.table.insert(), id=11)
row = self.table.select(self.table.c.id == 11).execution_options(autocommit=True).execute().fetchone()
self.assertTrue(row['data'] is None, row['data'])
@fixture.usedb(not_supported='firebird')
def test_null(self):
"""Can change a column's null constraint"""
self.assertEqual(self.table.c.data.nullable, True)
# Full column
self.table.c.data.alter(type=String(40), nullable=False)
self.table.nullable = None
self.refresh_table(self.table.name)
self.assertEqual(self.table.c.data.nullable, False)
# Just the new status
self.table.c.data.alter(nullable=True)
self.refresh_table(self.table.name)
self.assertEqual(self.table.c.data.nullable, True)
@fixture.usedb()
def test_alter_deprecated(self):
try:
# py 2.4 compatability :-/
cw = catch_warnings(record=True)
w = cw.__enter__()
warnings.simplefilter("always")
self.table.c.data.alter(Column('data', String(100)))
self.assertEqual(len(w),1)
self.assertTrue(issubclass(w[-1].category,
MigrateDeprecationWarning))
self.assertEqual(
'Passing a Column object to alter_column is deprecated. '
'Just pass in keyword parameters instead.',
str(w[-1].message))
finally:
cw.__exit__()
@fixture.usedb()
def test_alter_returns_delta(self):
"""Test if alter constructs return delta"""
delta = self.table.c.data.alter(type=String(100))
self.assertTrue('type' in delta)
@fixture.usedb()
def test_alter_all(self):
"""Tests all alter changes at one time"""
# test for each db separately
# since currently some dont support everything
# test pre settings
self.assertEqual(self.table.c.data.nullable, True)
self.assertEqual(self.table.c.data.server_default.arg, 'tluafed')
self.assertEqual(self.table.c.data.name, 'data')
self.assertTrue(isinstance(self.table.c.data.type, String))
self.assertTrue(self.table.c.data.type.length, 40)
kw = dict(nullable=False,
server_default='foobar',
name='data_new',
type=String(50))
if self.engine.name == 'firebird':
del kw['nullable']
self.table.c.data.alter(**kw)
# test altered objects
self.assertEqual(self.table.c.data.server_default.arg, 'foobar')
if not self.engine.name == 'firebird':
self.assertEqual(self.table.c.data.nullable, False)
self.assertEqual(self.table.c.data.name, 'data_new')
self.assertEqual(self.table.c.data.type.length, 50)
self.refresh_table(self.table.name)
# test post settings
if not self.engine.name == 'firebird':
self.assertEqual(self.table.c.data_new.nullable, False)
self.assertEqual(self.table.c.data_new.name, 'data_new')
self.assertTrue(isinstance(self.table.c.data_new.type, String))
self.assertTrue(self.table.c.data_new.type.length, 50)
# insert data and assert default
self.table.insert(values={'id': 10}).execute()
row = self._select_row()
self.assertEqual(u'foobar', row['data_new'])
class TestColumnDelta(fixture.DB):
"""Tests ColumnDelta class"""
level = fixture.DB.CONNECT
table_name = 'tmp_coldelta'
table_int = 0
def _setup(self, url):
super(TestColumnDelta, self)._setup(url)
self.meta = MetaData()
self.table = Table(self.table_name, self.meta,
Column('ids', String(10)),
)
self.meta.bind = self.engine
if self.engine.has_table(self.table.name):
self.table.drop()
self.table.create()
def _teardown(self):
if self.engine.has_table(self.table.name):
self.table.drop()
self.meta.clear()
super(TestColumnDelta,self)._teardown()
def mkcol(self, name='id', type=String, *p, **k):
return Column(name, type, *p, **k)
def verify(self, expected, original, *p, **k):
self.delta = ColumnDelta(original, *p, **k)
result = list(self.delta.keys())
result.sort()
self.assertEqual(expected, result)
return self.delta
def test_deltas_two_columns(self):
"""Testing ColumnDelta with two columns"""
col_orig = self.mkcol(primary_key=True)
col_new = self.mkcol(name='ids', primary_key=True)
self.verify([], col_orig, col_orig)
self.verify(['name'], col_orig, col_orig, 'ids')
self.verify(['name'], col_orig, col_orig, name='ids')
self.verify(['name'], col_orig, col_new)
self.verify(['name', 'type'], col_orig, col_new, type=String)
# Type comparisons
self.verify([], self.mkcol(type=String), self.mkcol(type=String))
self.verify(['type'], self.mkcol(type=String), self.mkcol(type=Integer))
self.verify(['type'], self.mkcol(type=String), self.mkcol(type=String(42)))
self.verify([], self.mkcol(type=String(42)), self.mkcol(type=String(42)))
self.verify(['type'], self.mkcol(type=String(24)), self.mkcol(type=String(42)))
self.verify(['type'], self.mkcol(type=String(24)), self.mkcol(type=Text(24)))
# Other comparisons
self.verify(['primary_key'], self.mkcol(nullable=False), self.mkcol(primary_key=True))
# PK implies nullable=False
self.verify(['nullable', 'primary_key'], self.mkcol(nullable=True), self.mkcol(primary_key=True))
self.verify([], self.mkcol(primary_key=True), self.mkcol(primary_key=True))
self.verify(['nullable'], self.mkcol(nullable=True), self.mkcol(nullable=False))
self.verify([], self.mkcol(nullable=True), self.mkcol(nullable=True))
self.verify([], self.mkcol(server_default=None), self.mkcol(server_default=None))
self.verify([], self.mkcol(server_default='42'), self.mkcol(server_default='42'))
# test server default
delta = self.verify(['server_default'], self.mkcol(), self.mkcol('id', String, DefaultClause('foobar')))
self.assertEqual(delta['server_default'].arg, 'foobar')
self.verify([], self.mkcol(server_default='foobar'), self.mkcol('id', String, DefaultClause('foobar')))
self.verify(['type'], self.mkcol(server_default='foobar'), self.mkcol('id', Text, DefaultClause('foobar')))
col = self.mkcol(server_default='foobar')
self.verify(['type'], col, self.mkcol('id', Text, DefaultClause('foobar')), alter_metadata=True)
self.assertTrue(isinstance(col.type, Text))
col = self.mkcol()
self.verify(['name', 'server_default', 'type'], col, self.mkcol('beep', Text, DefaultClause('foobar')),
alter_metadata=True)
self.assertTrue(isinstance(col.type, Text))
self.assertEqual(col.name, 'beep')
self.assertEqual(col.server_default.arg, 'foobar')
@fixture.usedb()
def test_deltas_zero_columns(self):
"""Testing ColumnDelta with zero columns"""
self.verify(['name'], 'ids', table=self.table, name='hey')
# test reflection
self.verify(['type'], 'ids', table=self.table.name, type=String(80), engine=self.engine)
self.verify(['type'], 'ids', table=self.table.name, type=String(80), metadata=self.meta)
self.meta.clear()
delta = self.verify(['type'], 'ids', table=self.table.name, type=String(80), metadata=self.meta,
alter_metadata=True)
self.assertTrue(self.table.name in self.meta)
self.assertEqual(delta.result_column.type.length, 80)
self.assertEqual(self.meta.tables.get(self.table.name).c.ids.type.length, 80)
# test defaults
self.meta.clear()
self.verify(['server_default'], 'ids', table=self.table.name, server_default='foobar',
metadata=self.meta,
alter_metadata=True)
self.meta.tables.get(self.table.name).c.ids.server_default.arg == 'foobar'
# test missing parameters
self.assertRaises(ValueError, ColumnDelta, table=self.table.name)
self.assertRaises(ValueError, ColumnDelta, 'ids', table=self.table.name, alter_metadata=True)
self.assertRaises(ValueError, ColumnDelta, 'ids', table=self.table.name, alter_metadata=False)
def test_deltas_one_column(self):
"""Testing ColumnDelta with one column"""
col_orig = self.mkcol(primary_key=True)
self.verify([], col_orig)
self.verify(['name'], col_orig, 'ids')
# Parameters are always executed, even if they're 'unchanged'
# (We can't assume given column is up-to-date)
self.verify(['name', 'primary_key', 'type'], col_orig, 'id', Integer, primary_key=True)
self.verify(['name', 'primary_key', 'type'], col_orig, name='id', type=Integer, primary_key=True)
# Change name, given an up-to-date definition and the current name
delta = self.verify(['name'], col_orig, name='blah')
self.assertEqual(delta.get('name'), 'blah')
self.assertEqual(delta.current_name, 'id')
col_orig = self.mkcol(primary_key=True)
self.verify(['name', 'type'], col_orig, name='id12', type=Text, alter_metadata=True)
self.assertTrue(isinstance(col_orig.type, Text))
self.assertEqual(col_orig.name, 'id12')
# test server default
col_orig = self.mkcol(primary_key=True)
delta = self.verify(['server_default'], col_orig, DefaultClause('foobar'))
self.assertEqual(delta['server_default'].arg, 'foobar')
delta = self.verify(['server_default'], col_orig, server_default=DefaultClause('foobar'))
self.assertEqual(delta['server_default'].arg, 'foobar')
# no change
col_orig = self.mkcol(server_default=DefaultClause('foobar'))
delta = self.verify(['type'], col_orig, DefaultClause('foobar'), type=PickleType)
self.assertTrue(isinstance(delta.result_column.type, PickleType))
# TODO: test server on update
# TODO: test bind metadata
| mit | -1,850,656,999,979,722,800 | 5,425,766,409,446,691,000 | 36.992731 | 115 | 0.586246 | false |
ilay09/keystone | keystone/common/cache/_context_cache.py | 5 | 3758 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A dogpile.cache proxy that caches objects in the request local cache."""
from dogpile.cache import api
from dogpile.cache import proxy
from oslo_context import context as oslo_context
from oslo_serialization import msgpackutils
# Register our new handler.
_registry = msgpackutils.default_registry
def _register_model_handler(handler_class):
"""Register a new model handler."""
_registry.frozen = False
_registry.register(handler_class(registry=_registry))
_registry.frozen = True
class _ResponseCacheProxy(proxy.ProxyBackend):
__key_pfx = '_request_cache_%s'
def _get_request_context(self):
# Return the current context or a new/empty context.
return oslo_context.get_current() or oslo_context.RequestContext()
def _get_request_key(self, key):
return self.__key_pfx % key
def _set_local_cache(self, key, value):
# Set a serialized version of the returned value in local cache for
# subsequent calls to the memoized method.
ctx = self._get_request_context()
serialize = {'payload': value.payload, 'metadata': value.metadata}
setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize))
def _get_local_cache(self, key):
# Return the version from our local request cache if it exists.
ctx = self._get_request_context()
try:
value = getattr(ctx, self._get_request_key(key))
except AttributeError:
return api.NO_VALUE
value = msgpackutils.loads(value)
return api.CachedValue(payload=value['payload'],
metadata=value['metadata'])
def _delete_local_cache(self, key):
# On invalidate/delete remove the value from the local request cache
ctx = self._get_request_context()
try:
delattr(ctx, self._get_request_key(key))
except AttributeError: # nosec
# NOTE(morganfainberg): We will simply pass here, this value has
# not been cached locally in the request.
pass
def get(self, key):
value = self._get_local_cache(key)
if value is api.NO_VALUE:
value = self.proxied.get(key)
if value is not api.NO_VALUE:
self._set_local_cache(key, value)
return value
def set(self, key, value):
self._set_local_cache(key, value)
self.proxied.set(key, value)
def delete(self, key):
self._delete_local_cache(key)
self.proxied.delete(key)
def get_multi(self, keys):
values = {}
for key in keys:
v = self._get_local_cache(key)
if v is not api.NO_VALUE:
values[key] = v
query_keys = set(keys).difference(set(values.keys()))
values.update(dict(
zip(query_keys, self.proxied.get_multi(query_keys))))
return [values[k] for k in keys]
def set_multi(self, mapping):
for k, v in mapping.items():
self._set_local_cache(k, v)
self.proxied.set_multi(mapping)
def delete_multi(self, keys):
for k in keys:
self._delete_local_cache(k)
self.proxied.delete_multi(keys)
| apache-2.0 | -2,222,878,581,380,671,700 | 2,925,390,118,867,583,500 | 34.45283 | 79 | 0.639702 | false |
0xCCD/mitro | browser-ext/third_party/firefox-addon-sdk/python-lib/plural-rules-generator.py | 36 | 6116 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Program used to generate /packages/api-utils/lib/l10n/plural-rules.js
# Fetch unicode.org data in order to build functions specific to each language
# that will return for a given integer, its plural form name.
# Plural form names are: zero, one, two, few, many, other.
#
# More information here:
# http://unicode.org/repos/cldr-tmp/trunk/diff/supplemental/language_plural_rules.html
# http://cldr.unicode.org/index/cldr-spec/plural-rules
# Usage:
# $ python plural-rules-generator.py > ../packages/api-utils/lib/l10n/plural-rules.js
import urllib2
import xml.dom.minidom
import json
import re
PRINT_CONDITIONS_IN_COMMENTS = False
UNICODE_ORG_XML_URL = "http://unicode.org/repos/cldr/trunk/common/supplemental/plurals.xml"
CONDITION_RE = r'n( mod \d+)? (is|in|within|(not in))( not)? ([^\s]+)'
# For a given regexp.MatchObject `g` for `CONDITION_RE`,
# returns the equivalent JS piece of code
# i.e. maps pseudo conditional language from unicode.org XML to JS code
def parseCondition(g):
lvalue = "n"
if g.group(1):
lvalue = "(n %% %d)" % int(g.group(1).replace("mod ", ""))
operator = g.group(2)
if g.group(4):
operator += " not"
rvalue = g.group(5)
if operator == "is":
return "%s == %s" % (lvalue, rvalue)
if operator == "is not":
return "%s != %s" % (lvalue, rvalue)
# "in", "within" or "not in" case:
notPrefix = ""
if operator == "not in":
notPrefix = "!"
# `rvalue` is a comma seperated list of either:
# - numbers: 42
# - ranges: 42..72
sections = rvalue.split(',')
if ".." not in rvalue:
# If we don't have range, but only a list of integer,
# we can simplify the generated code by using `isIn`
# n in 1,3,6,42
return "%sisIn(%s, [%s])" % (notPrefix, lvalue, ", ".join(sections))
# n in 1..42
# n in 1..3,42
subCondition = []
integers = []
for sub in sections:
if ".." in sub:
left, right = sub.split("..")
subCondition.append("isBetween(%s, %d, %d)" % (
lvalue,
int(left),
int(right)
))
else:
integers.append(int(sub))
if len(integers) > 1:
subCondition.append("isIn(%s, [%s])" % (lvalue, ", ".join(integers)))
elif len(integers) == 1:
subCondition.append("(%s == %s)" % (lvalue, integers[0]))
return "%s(%s)" % (notPrefix, " || ".join(subCondition))
def computeRules():
# Fetch plural rules data directly from unicode.org website:
url = UNICODE_ORG_XML_URL
f = urllib2.urlopen(url)
doc = xml.dom.minidom.parse(f)
# Read XML document and extract locale to rules mapping
localesMapping = {}
algorithms = {}
for index,pluralRules in enumerate(doc.getElementsByTagName("pluralRules")):
if not index in algorithms:
algorithms[index] = {}
for locale in pluralRules.getAttribute("locales").split():
localesMapping[locale] = index
for rule in pluralRules.childNodes:
if rule.nodeType != rule.ELEMENT_NODE or rule.tagName != "pluralRule":
continue
pluralForm = rule.getAttribute("count")
algorithm = rule.firstChild.nodeValue
algorithms[index][pluralForm] = algorithm
# Go through all rules and compute a Javascript code for each of them
rules = {}
for index,rule in algorithms.iteritems():
lines = []
for pluralForm in rule:
condition = rule[pluralForm]
originalCondition = str(condition)
# Convert pseudo language to JS code
condition = rule[pluralForm].lower()
condition = re.sub(CONDITION_RE, parseCondition, condition)
condition = re.sub(r'or', "||", condition)
condition = re.sub(r'and', "&&", condition)
# Prints original condition in unicode.org pseudo language
if PRINT_CONDITIONS_IN_COMMENTS:
lines.append( '// %s' % originalCondition )
lines.append( 'if (%s)' % condition )
lines.append( ' return "%s";' % pluralForm )
rules[index] = "\n ".join(lines)
return localesMapping, rules
localesMapping, rules = computeRules()
rulesLines = []
for index in rules:
lines = rules[index]
rulesLines.append('"%d": function (n) {' % index)
rulesLines.append(' %s' % lines)
rulesLines.append(' return "other"')
rulesLines.append('},')
print """/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// This file is automatically generated with /python-lib/plural-rules-generator.py
// Fetching data from: %s
// Mapping of short locale name == to == > rule index in following list
const LOCALES_TO_RULES = %s;
// Utility functions for plural rules methods
function isIn(n, list) list.indexOf(n) !== -1;
function isBetween(n, start, end) start <= n && n <= end;
// List of all plural rules methods, that maps an integer to the plural form name to use
const RULES = {
%s
};
/**
* Return a function that gives the plural form name for a given integer
* for the specified `locale`
* let fun = getRulesForLocale('en');
* fun(1) -> 'one'
* fun(0) -> 'other'
* fun(1000) -> 'other'
*/
exports.getRulesForLocale = function getRulesForLocale(locale) {
let index = LOCALES_TO_RULES[locale];
if (!(index in RULES)) {
console.warn('Plural form unknown for locale "' + locale + '"');
return function () { return "other"; };
}
return RULES[index];
}
""" % (UNICODE_ORG_XML_URL,
json.dumps(localesMapping, sort_keys=True, indent=2),
"\n ".join(rulesLines))
| gpl-3.0 | -6,138,410,769,320,051,000 | 2,464,282,570,854,367,000 | 33.75 | 91 | 0.60775 | false |
akretion/odoo | odoo/addons/base/models/ir_module.py | 4 | 42225 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
from collections import defaultdict
from decorator import decorator
from operator import attrgetter
import importlib
import io
import logging
import os
import shutil
import tempfile
import zipfile
import requests
import werkzeug.urls
from odoo.tools import pycompat
from docutils import nodes
from docutils.core import publish_string
from docutils.transforms import Transform, writer_aux
from docutils.writers.html4css1 import Writer
import lxml.html
import psycopg2
import odoo
from odoo import api, fields, models, modules, tools, _
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
from odoo.exceptions import AccessDenied, UserError
from odoo.osv import expression
from odoo.tools.parse_version import parse_version
from odoo.tools.misc import topological_sort
from odoo.http import request
_logger = logging.getLogger(__name__)
ACTION_DICT = {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'target': 'new',
'type': 'ir.actions.act_window',
}
def backup(path, raise_exception=True):
path = os.path.normpath(path)
if not os.path.exists(path):
if not raise_exception:
return None
raise OSError('path does not exists')
cnt = 1
while True:
bck = '%s~%d' % (path, cnt)
if not os.path.exists(bck):
shutil.move(path, bck)
return bck
cnt += 1
def assert_log_admin_access(method):
"""Decorator checking that the calling user is an administrator, and logging the call.
Raises an AccessDenied error if the user does not have administrator privileges, according
to `user._is_admin()`.
"""
def check_and_log(method, self, *args, **kwargs):
user = self.env.user
origin = request.httprequest.remote_addr if request else 'n/a'
log_data = (method.__name__, self.sudo().mapped('name'), user.login, user.id, origin)
if not self.env.user._is_admin():
_logger.warning('DENY access to module.%s on %s to user %s ID #%s via %s', *log_data)
raise AccessDenied()
_logger.info('ALLOW access to module.%s on %s to user %s #%s via %s', *log_data)
return method(self, *args, **kwargs)
return decorator(check_and_log, method)
class ModuleCategory(models.Model):
_name = "ir.module.category"
_description = "Application"
_order = 'name'
@api.depends('module_ids')
def _compute_module_nr(self):
cr = self._cr
cr.execute('SELECT category_id, COUNT(*) \
FROM ir_module_module \
WHERE category_id IN %(ids)s \
OR category_id IN (SELECT id \
FROM ir_module_category \
WHERE parent_id IN %(ids)s) \
GROUP BY category_id', {'ids': tuple(self.ids)}
)
result = dict(cr.fetchall())
for cat in self.filtered('id'):
cr.execute('SELECT id FROM ir_module_category WHERE parent_id=%s', (cat.id,))
cat.module_nr = sum([result.get(c, 0) for (c,) in cr.fetchall()], result.get(cat.id, 0))
name = fields.Char(string='Name', required=True, translate=True, index=True)
parent_id = fields.Many2one('ir.module.category', string='Parent Application', index=True)
child_ids = fields.One2many('ir.module.category', 'parent_id', string='Child Applications')
module_nr = fields.Integer(string='Number of Apps', compute='_compute_module_nr')
module_ids = fields.One2many('ir.module.module', 'category_id', string='Modules')
description = fields.Text(string='Description', translate=True)
sequence = fields.Integer(string='Sequence')
visible = fields.Boolean(string='Visible', default=True)
exclusive = fields.Boolean(string='Exclusive')
xml_id = fields.Char(string='External ID', compute='_compute_xml_id')
def _compute_xml_id(self):
xml_ids = defaultdict(list)
domain = [('model', '=', self._name), ('res_id', 'in', self.ids)]
for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id']):
xml_ids[data['res_id']].append("%s.%s" % (data['module'], data['name']))
for cat in self:
cat.xml_id = xml_ids.get(cat.id, [''])[0]
class MyFilterMessages(Transform):
"""
Custom docutils transform to remove `system message` for a document and
generate warnings.
(The standard filter removes them based on some `report_level` passed in
the `settings_override` dictionary, but if we use it, we can't see them
and generate warnings.)
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
_logger.warning("docutils' system message present: %s", str(node))
node.parent.remove(node)
class MyWriter(Writer):
"""
Custom docutils html4ccs1 writer that doesn't add the warnings to the
output document.
"""
def get_transforms(self):
return [MyFilterMessages, writer_aux.Admonitions]
STATES = [
('uninstallable', 'Uninstallable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed'),
]
class Module(models.Model):
_name = "ir.module.module"
_rec_name = "shortdesc"
_description = "Module"
_order = 'sequence,name'
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(Module, self).fields_view_get(view_id, view_type, toolbar=toolbar, submenu=False)
if view_type == 'form' and res.get('toolbar',False):
install_id = self.env.ref('base.action_server_module_immediate_install').id
action = [rec for rec in res['toolbar']['action'] if rec.get('id', False) != install_id]
res['toolbar'] = {'action': action}
return res
@classmethod
def get_module_info(cls, name):
try:
return modules.load_information_from_description_file(name)
except Exception:
_logger.debug('Error when trying to fetch information for module %s', name, exc_info=True)
return {}
@api.depends('name', 'description')
def _get_desc(self):
for module in self:
path = modules.get_module_resource(module.name, 'static/description/index.html')
if path:
with tools.file_open(path, 'rb') as desc_file:
doc = desc_file.read()
html = lxml.html.document_fromstring(doc)
for element, attribute, link, pos in html.iterlinks():
if element.get('src') and not '//' in element.get('src') and not 'static/' in element.get('src'):
element.set('src', "/%s/static/description/%s" % (module.name, element.get('src')))
module.description_html = tools.html_sanitize(lxml.html.tostring(html))
else:
overrides = {
'embed_stylesheet': False,
'doctitle_xform': False,
'output_encoding': 'unicode',
'xml_declaration': False,
'file_insertion_enabled': False,
}
output = publish_string(source=module.description if not module.application and module.description else '', settings_overrides=overrides, writer=MyWriter())
module.description_html = tools.html_sanitize(output)
@api.depends('name')
def _get_latest_version(self):
default_version = modules.adapt_version('1.0')
for module in self:
module.installed_version = self.get_module_info(module.name).get('version', default_version)
@api.depends('name', 'state')
def _get_views(self):
IrModelData = self.env['ir.model.data'].with_context(active_test=True)
dmodels = ['ir.ui.view', 'ir.actions.report', 'ir.ui.menu']
for module in self:
# Skip uninstalled modules below, no data to find anyway.
if module.state not in ('installed', 'to upgrade', 'to remove'):
module.views_by_module = ""
module.reports_by_module = ""
module.menus_by_module = ""
continue
# then, search and group ir.model.data records
imd_models = defaultdict(list)
imd_domain = [('module', '=', module.name), ('model', 'in', tuple(dmodels))]
for data in IrModelData.sudo().search(imd_domain):
imd_models[data.model].append(data.res_id)
def browse(model):
# as this method is called before the module update, some xmlid
# may be invalid at this stage; explictly filter records before
# reading them
return self.env[model].browse(imd_models[model]).exists()
def format_view(v):
return '%s%s (%s)' % (v.inherit_id and '* INHERIT ' or '', v.name, v.type)
module.views_by_module = "\n".join(sorted(format_view(v) for v in browse('ir.ui.view')))
module.reports_by_module = "\n".join(sorted(r.name for r in browse('ir.actions.report')))
module.menus_by_module = "\n".join(sorted(m.complete_name for m in browse('ir.ui.menu')))
@api.depends('icon')
def _get_icon_image(self):
for module in self:
module.icon_image = ''
if module.icon:
path_parts = module.icon.split('/')
path = modules.get_module_resource(path_parts[1], *path_parts[2:])
else:
path = modules.module.get_module_icon(module.name)
if path:
with tools.file_open(path, 'rb') as image_file:
module.icon_image = base64.b64encode(image_file.read())
name = fields.Char('Technical Name', readonly=True, required=True, index=True)
category_id = fields.Many2one('ir.module.category', string='Category', readonly=True, index=True)
shortdesc = fields.Char('Module Name', readonly=True, translate=True)
summary = fields.Char('Summary', readonly=True, translate=True)
description = fields.Text('Description', readonly=True, translate=True)
description_html = fields.Html('Description HTML', compute='_get_desc')
author = fields.Char("Author", readonly=True)
maintainer = fields.Char('Maintainer', readonly=True)
contributors = fields.Text('Contributors', readonly=True)
website = fields.Char("Website", readonly=True)
# attention: Incorrect field names !!
# installed_version refers the latest version (the one on disk)
# latest_version refers the installed version (the one in database)
# published_version refers the version available on the repository
installed_version = fields.Char('Latest Version', compute='_get_latest_version')
latest_version = fields.Char('Installed Version', readonly=True)
published_version = fields.Char('Published Version', readonly=True)
url = fields.Char('URL', readonly=True)
sequence = fields.Integer('Sequence', default=100)
dependencies_id = fields.One2many('ir.module.module.dependency', 'module_id',
string='Dependencies', readonly=True)
exclusion_ids = fields.One2many('ir.module.module.exclusion', 'module_id',
string='Exclusions', readonly=True)
auto_install = fields.Boolean('Automatic Installation',
help='An auto-installable module is automatically installed by the '
'system when all its dependencies are satisfied. '
'If the module has no dependency, it is always installed.')
state = fields.Selection(STATES, string='Status', default='uninstallable', readonly=True, index=True)
demo = fields.Boolean('Demo Data', default=False, readonly=True)
license = fields.Selection([
('GPL-2', 'GPL Version 2'),
('GPL-2 or any later version', 'GPL-2 or later version'),
('GPL-3', 'GPL Version 3'),
('GPL-3 or any later version', 'GPL-3 or later version'),
('AGPL-3', 'Affero GPL-3'),
('LGPL-3', 'LGPL Version 3'),
('Other OSI approved licence', 'Other OSI Approved Licence'),
('OEEL-1', 'Odoo Enterprise Edition License v1.0'),
('OPL-1', 'Odoo Proprietary License v1.0'),
('Other proprietary', 'Other Proprietary')
], string='License', default='LGPL-3', readonly=True)
menus_by_module = fields.Text(string='Menus', compute='_get_views', store=True)
reports_by_module = fields.Text(string='Reports', compute='_get_views', store=True)
views_by_module = fields.Text(string='Views', compute='_get_views', store=True)
application = fields.Boolean('Application', readonly=True)
icon = fields.Char('Icon URL')
icon_image = fields.Binary(string='Icon', compute='_get_icon_image')
to_buy = fields.Boolean('Odoo Enterprise Module', default=False)
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', 'The name of the module must be unique!'),
]
@api.multi
def unlink(self):
if not self:
return True
for module in self:
if module.state in ('installed', 'to upgrade', 'to remove', 'to install'):
raise UserError(_('You are trying to remove a module that is installed or will be installed.'))
self.clear_caches()
return super(Module, self).unlink()
@staticmethod
def _check_external_dependencies(terp):
depends = terp.get('external_dependencies')
if not depends:
return
for pydep in depends.get('python', []):
try:
importlib.import_module(pydep)
except ImportError:
raise ImportError('No module named %s' % (pydep,))
for binary in depends.get('bin', []):
try:
tools.find_in_path(binary)
except IOError:
raise Exception('Unable to find %r in path' % (binary,))
@classmethod
def check_external_dependencies(cls, module_name, newstate='to install'):
terp = cls.get_module_info(module_name)
try:
cls._check_external_dependencies(terp)
except Exception as e:
if newstate == 'to install':
msg = _('Unable to install module "%s" because an external dependency is not met: %s')
elif newstate == 'to upgrade':
msg = _('Unable to upgrade module "%s" because an external dependency is not met: %s')
else:
msg = _('Unable to process module "%s" because an external dependency is not met: %s')
raise UserError(msg % (module_name, e.args[0]))
@api.multi
def _state_update(self, newstate, states_to_update, level=100):
if level < 1:
raise UserError(_('Recursion error in modules dependencies !'))
# whether some modules are installed with demo data
demo = False
for module in self:
# determine dependency modules to update/others
update_mods, ready_mods = self.browse(), self.browse()
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise UserError(_("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system.") % (module.name, dep.name,))
if dep.depend_id.state == newstate:
ready_mods += dep.depend_id
else:
update_mods += dep.depend_id
# update dependency modules that require it, and determine demo for module
update_demo = update_mods._state_update(newstate, states_to_update, level=level-1)
module_demo = module.demo or update_demo or any(mod.demo for mod in ready_mods)
demo = demo or module_demo
if module.state in states_to_update:
# check dependencies and update module itself
self.check_external_dependencies(module.name, newstate)
module.write({'state': newstate, 'demo': module_demo})
return demo
@assert_log_admin_access
@api.multi
def button_install(self):
# domain to select auto-installable (but not yet installed) modules
auto_domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)]
# determine whether an auto-install module must be installed:
# - all its dependencies are installed or to be installed,
# - at least one dependency is 'to install'
install_states = frozenset(('installed', 'to install', 'to upgrade'))
def must_install(module):
states = set(dep.state for dep in module.dependencies_id)
return states <= install_states and 'to install' in states
modules = self
while modules:
# Mark the given modules and their dependencies to be installed.
modules._state_update('to install', ['uninstalled'])
# Determine which auto-installable modules must be installed.
modules = self.search(auto_domain).filtered(must_install)
# the modules that are installed/to install/to upgrade
install_mods = self.search([('state', 'in', list(install_states))])
# check individual exclusions
install_names = {module.name for module in install_mods}
for module in install_mods:
for exclusion in module.exclusion_ids:
if exclusion.name in install_names:
msg = _('Modules "%s" and "%s" are incompatible.')
raise UserError(msg % (module.shortdesc, exclusion.exclusion_id.shortdesc))
# check category exclusions
def closure(module):
todo = result = module
while todo:
result |= todo
todo = todo.mapped('dependencies_id.depend_id')
return result
exclusives = self.env['ir.module.category'].search([('exclusive', '=', True)])
for category in exclusives:
# retrieve installed modules in category and sub-categories
categories = category.search([('id', 'child_of', category.ids)])
modules = install_mods.filtered(lambda mod: mod.category_id in categories)
# the installation is valid if all installed modules in categories
# belong to the transitive dependencies of one of them
if modules and not any(modules <= closure(module) for module in modules):
msg = _('You are trying to install incompatible modules in category "%s":')
labels = dict(self.fields_get(['state'])['state']['selection'])
raise UserError("\n".join([msg % category.name] + [
"- %s (%s)" % (module.shortdesc, labels[module.state])
for module in modules
]))
return dict(ACTION_DICT, name=_('Install'))
@assert_log_admin_access
@api.multi
def button_immediate_install(self):
""" Installs the selected module(s) immediately and fully,
returns the next res.config action to execute
:returns: next res.config item to execute
:rtype: dict[str, object]
"""
_logger.info('User #%d triggered module installation', self.env.uid)
return self._button_immediate_function(type(self).button_install)
@assert_log_admin_access
@api.multi
def button_install_cancel(self):
self.write({'state': 'uninstalled', 'demo': False})
return True
@assert_log_admin_access
@api.multi
def module_uninstall(self):
""" Perform the various steps required to uninstall a module completely
including the deletion of all database structures created by the module:
tables, columns, constraints, etc.
"""
modules_to_remove = self.mapped('name')
self.env['ir.model.data']._module_data_uninstall(modules_to_remove)
# we deactivate prefetching to not try to read a column that has been deleted
self.with_context(prefetch_fields=False).write({'state': 'uninstalled', 'latest_version': False})
return True
@api.multi
def _remove_copied_views(self):
""" Remove the copies of the views installed by the modules in `self`.
Those copies do not have an external id so they will not be cleaned by
`_module_data_uninstall`. This is why we rely on `key` instead.
It is important to remove these copies because using them will crash if
they rely on data that don't exist anymore if the module is removed.
"""
domain = expression.OR([[('key', '=like', m.name + '.%')] for m in self])
orphans = self.env['ir.ui.view'].with_context(**{'active_test': False, MODULE_UNINSTALL_FLAG: True}).search(domain)
orphans.unlink()
@api.multi
@api.returns('self')
def downstream_dependencies(self, known_deps=None,
exclude_states=('uninstalled', 'uninstallable', 'to remove')):
""" Return the modules that directly or indirectly depend on the modules
in `self`, and that satisfy the `exclude_states` filter.
"""
if not self:
return self
known_deps = known_deps or self.browse()
query = """ SELECT DISTINCT m.id
FROM ir_module_module_dependency d
JOIN ir_module_module m ON (d.module_id=m.id)
WHERE
d.name IN (SELECT name from ir_module_module where id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s """
self._cr.execute(query, (tuple(self.ids), tuple(exclude_states), tuple(known_deps.ids or self.ids)))
new_deps = self.browse([row[0] for row in self._cr.fetchall()])
missing_mods = new_deps - known_deps
known_deps |= new_deps
if missing_mods:
known_deps |= missing_mods.downstream_dependencies(known_deps, exclude_states)
return known_deps
@api.multi
@api.returns('self')
def upstream_dependencies(self, known_deps=None,
exclude_states=('installed', 'uninstallable', 'to remove')):
""" Return the dependency tree of modules of the modules in `self`, and
that satisfy the `exclude_states` filter.
"""
if not self:
return self
known_deps = known_deps or self.browse()
query = """ SELECT DISTINCT m.id
FROM ir_module_module_dependency d
JOIN ir_module_module m ON (d.module_id=m.id)
WHERE
m.name IN (SELECT name from ir_module_module_dependency where module_id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s """
self._cr.execute(query, (tuple(self.ids), tuple(exclude_states), tuple(known_deps.ids or self.ids)))
new_deps = self.browse([row[0] for row in self._cr.fetchall()])
missing_mods = new_deps - known_deps
known_deps |= new_deps
if missing_mods:
known_deps |= missing_mods.upstream_dependencies(known_deps, exclude_states)
return known_deps
def next(self):
"""
Return the action linked to an ir.actions.todo is there exists one that
should be executed. Otherwise, redirect to /web
"""
Todos = self.env['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todo = Todos.search([('state', '=', 'open')], limit=1)
if active_todo:
_logger.info('next action is "%s"', active_todo.name)
return active_todo.action_launch()
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': '/web',
}
@api.multi
def _button_immediate_function(self, function):
try:
# This is done because the installation/uninstallation/upgrade can modify a currently
# running cron job and prevent it from finishing, and since the ir_cron table is locked
# during execution, the lock won't be released until timeout.
self._cr.execute("SELECT * FROM ir_cron FOR UPDATE NOWAIT")
except psycopg2.OperationalError:
raise UserError(_("The server is busy right now, module operations are not possible at"
" this time, please try again later."))
function(self)
self._cr.commit()
api.Environment.reset()
modules.registry.Registry.new(self._cr.dbname, update_module=True)
self._cr.commit()
env = api.Environment(self._cr, self._uid, self._context)
# pylint: disable=next-method-called
config = env['ir.module.module'].next() or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# reload the client; open the first available root menu
menu = env['ir.ui.menu'].search([('parent_id', '=', False)])[:1]
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu.id},
}
@assert_log_admin_access
@api.multi
def button_immediate_uninstall(self):
"""
Uninstall the selected module(s) immediately and fully,
returns the next res.config action to execute
"""
_logger.info('User #%d triggered module uninstallation', self.env.uid)
return self._button_immediate_function(type(self).button_uninstall)
@assert_log_admin_access
@api.multi
def button_uninstall(self):
if 'base' in self.mapped('name'):
raise UserError(_("The `base` module cannot be uninstalled"))
if not all(state in ('installed', 'to upgrade') for state in self.mapped('state')):
raise UserError(_(
"One or more of the selected modules have already been uninstalled, if you "
"believe this to be an error, you may try again later or contact support."
))
deps = self.downstream_dependencies()
(self + deps).write({'state': 'to remove'})
return dict(ACTION_DICT, name=_('Uninstall'))
@assert_log_admin_access
@api.multi
def button_uninstall_wizard(self):
""" Launch the wizard to uninstall the given module. """
return {
'type': 'ir.actions.act_window',
'target': 'new',
'name': _('Uninstall module'),
'view_mode': 'form',
'res_model': 'base.module.uninstall',
'context': {'default_module_id': self.id},
}
@api.multi
def button_uninstall_cancel(self):
self.write({'state': 'installed'})
return True
@assert_log_admin_access
@api.multi
def button_immediate_upgrade(self):
"""
Upgrade the selected module(s) immediately and fully,
return the next res.config action to execute
"""
return self._button_immediate_function(type(self).button_upgrade)
@assert_log_admin_access
@api.multi
def button_upgrade(self):
Dependency = self.env['ir.module.module.dependency']
self.update_list()
todo = list(self)
i = 0
while i < len(todo):
module = todo[i]
i += 1
if module.state not in ('installed', 'to upgrade'):
raise UserError(_("Can not upgrade module '%s'. It is not installed.") % (module.name,))
self.check_external_dependencies(module.name, 'to upgrade')
for dep in Dependency.search([('name', '=', module.name)]):
if dep.module_id.state == 'installed' and dep.module_id not in todo:
todo.append(dep.module_id)
self.browse(module.id for module in todo).write({'state': 'to upgrade'})
to_install = []
for module in todo:
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise UserError(_('You try to upgrade the module %s that depends on the module: %s.\nBut this module is not available in your system.') % (module.name, dep.name,))
if dep.state == 'uninstalled':
to_install += self.search([('name', '=', dep.name)]).ids
self.browse(to_install).button_install()
return dict(ACTION_DICT, name=_('Apply Schedule Upgrade'))
@assert_log_admin_access
@api.multi
def button_upgrade_cancel(self):
self.write({'state': 'installed'})
return True
@staticmethod
def get_values_from_terp(terp):
return {
'description': terp.get('description', ''),
'shortdesc': terp.get('name', ''),
'author': terp.get('author', 'Unknown'),
'maintainer': terp.get('maintainer', False),
'contributors': ', '.join(terp.get('contributors', [])) or False,
'website': terp.get('website', ''),
'license': terp.get('license', 'LGPL-3'),
'sequence': terp.get('sequence', 100),
'application': terp.get('application', False),
'auto_install': terp.get('auto_install', False),
'icon': terp.get('icon', False),
'summary': terp.get('summary', ''),
'url': terp.get('url') or terp.get('live_test_url', ''),
'to_buy': False
}
@api.model
def create(self, vals):
new = super(Module, self).create(vals)
module_metadata = {
'name': 'module_%s' % vals['name'],
'model': 'ir.module.module',
'module': 'base',
'res_id': new.id,
'noupdate': True,
}
self.env['ir.model.data'].create(module_metadata)
return new
# update the list of available packages
@assert_log_admin_access
@api.model
def update_list(self):
res = [0, 0] # [update, add]
default_version = modules.adapt_version('1.0')
known_mods = self.with_context(lang=None).search([])
known_mods_names = {mod.name: mod for mod in known_mods}
# iterate through detected modules and update/create them in db
for mod_name in modules.get_modules():
mod = known_mods_names.get(mod_name)
terp = self.get_module_info(mod_name)
values = self.get_values_from_terp(terp)
if mod:
updated_values = {}
for key in values:
old = getattr(mod, key)
updated = tools.ustr(values[key]) if isinstance(values[key], pycompat.string_types) else values[key]
if (old or updated) and updated != old:
updated_values[key] = values[key]
if terp.get('installable', True) and mod.state == 'uninstallable':
updated_values['state'] = 'uninstalled'
if parse_version(terp.get('version', default_version)) > parse_version(mod.latest_version or default_version):
res[0] += 1
if updated_values:
mod.write(updated_values)
else:
mod_path = modules.get_module_path(mod_name)
if not mod_path or not terp:
continue
state = "uninstalled" if terp.get('installable', True) else "uninstallable"
mod = self.create(dict(name=mod_name, state=state, **values))
res[1] += 1
mod._update_dependencies(terp.get('depends', []))
mod._update_exclusions(terp.get('excludes', []))
mod._update_category(terp.get('category', 'Uncategorized'))
return res
@assert_log_admin_access
@api.multi
def download(self, download=True):
return []
@assert_log_admin_access
@api.model
def install_from_urls(self, urls):
if not self.env.user.has_group('base.group_system'):
raise AccessDenied()
# One-click install is opt-in - cfr Issue #15225
ad_dir = tools.config.addons_data_dir
if not os.access(ad_dir, os.W_OK):
msg = (_("Automatic install of downloaded Apps is currently disabled.") + "\n\n" +
_("To enable it, make sure this directory exists and is writable on the server:") +
"\n%s" % ad_dir)
_logger.warning(msg)
raise UserError(msg)
apps_server = werkzeug.urls.url_parse(self.get_apps_server())
OPENERP = odoo.release.product_name.lower()
tmp = tempfile.mkdtemp()
_logger.debug('Install from url: %r', urls)
try:
# 1. Download & unzip missing modules
for module_name, url in urls.items():
if not url:
continue # nothing to download, local version is already the last one
up = werkzeug.urls.url_parse(url)
if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc:
raise AccessDenied()
try:
_logger.info('Downloading module `%s` from OpenERP Apps', module_name)
response = requests.get(url)
response.raise_for_status()
content = response.content
except Exception:
_logger.exception('Failed to fetch module %s', module_name)
raise UserError(_('The `%s` module appears to be unavailable at the moment, please try again later.') % module_name)
else:
zipfile.ZipFile(io.BytesIO(content)).extractall(tmp)
assert os.path.isdir(os.path.join(tmp, module_name))
# 2a. Copy/Replace module source in addons path
for module_name, url in urls.items():
if module_name == OPENERP or not url:
continue # OPENERP is special case, handled below, and no URL means local module
module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False)
bck = backup(module_path, False)
_logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path)
shutil.move(os.path.join(tmp, module_name), module_path)
if bck:
shutil.rmtree(bck)
# 2b. Copy/Replace server+base module source if downloaded
if urls.get(OPENERP):
# special case. it contains the server and the base module.
# extract path is not the same
base_path = os.path.dirname(modules.get_module_path('base'))
# copy all modules in the SERVER/odoo/addons directory to the new "odoo" module (except base itself)
for d in os.listdir(base_path):
if d != 'base' and os.path.isdir(os.path.join(base_path, d)):
destdir = os.path.join(tmp, OPENERP, 'addons', d) # XXX 'odoo' subdirectory ?
shutil.copytree(os.path.join(base_path, d), destdir)
# then replace the server by the new "base" module
server_dir = tools.config['root_path'] # XXX or dirname()
bck = backup(server_dir)
_logger.info('Copy downloaded module `odoo` to `%s`', server_dir)
shutil.move(os.path.join(tmp, OPENERP), server_dir)
#if bck:
# shutil.rmtree(bck)
self.update_list()
with_urls = [module_name for module_name, url in urls.items() if url]
downloaded = self.search([('name', 'in', with_urls)])
installed = self.search([('id', 'in', downloaded.ids), ('state', '=', 'installed')])
to_install = self.search([('name', 'in', list(urls)), ('state', '=', 'uninstalled')])
post_install_action = to_install.button_immediate_install()
if installed or to_install:
# in this case, force server restart to reload python code...
self._cr.commit()
odoo.service.server.restart()
return {
'type': 'ir.actions.client',
'tag': 'home',
'params': {'wait': True},
}
return post_install_action
finally:
shutil.rmtree(tmp)
@api.model
def get_apps_server(self):
return tools.config.get('apps_server', 'https://apps.odoo.com/apps')
def _update_dependencies(self, depends=None):
existing = set(dep.name for dep in self.dependencies_id)
needed = set(depends or [])
for dep in (needed - existing):
self._cr.execute('INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)', (self.id, dep))
for dep in (existing - needed):
self._cr.execute('DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s', (self.id, dep))
self.invalidate_cache(['dependencies_id'], self.ids)
def _update_exclusions(self, excludes=None):
existing = set(excl.name for excl in self.exclusion_ids)
needed = set(excludes or [])
for name in (needed - existing):
self._cr.execute('INSERT INTO ir_module_module_exclusion (module_id, name) VALUES (%s, %s)', (self.id, name))
for name in (existing - needed):
self._cr.execute('DELETE FROM ir_module_module_exclusion WHERE module_id=%s AND name=%s', (self.id, name))
self.invalidate_cache(['exclusion_ids'], self.ids)
def _update_category(self, category='Uncategorized'):
current_category = self.category_id
current_category_path = []
while current_category:
current_category_path.insert(0, current_category.name)
current_category = current_category.parent_id
categs = category.split('/')
if categs != current_category_path:
cat_id = modules.db.create_categories(self._cr, categs)
self.write({'category_id': cat_id})
@api.multi
def _update_translations(self, filter_lang=None):
if not filter_lang:
langs = self.env['res.lang'].search([('translatable', '=', True)])
filter_lang = [lang.code for lang in langs]
elif not isinstance(filter_lang, (list, tuple)):
filter_lang = [filter_lang]
update_mods = self.filtered(lambda r: r.state in ('installed', 'to install', 'to upgrade'))
mod_dict = {
mod.name: mod.dependencies_id.mapped('name')
for mod in update_mods
}
mod_names = topological_sort(mod_dict)
self.env['ir.translation'].load_module_terms(mod_names, filter_lang)
@api.multi
def _check(self):
for module in self:
if not module.description_html:
_logger.warning('module %s: description is empty !', module.name)
@api.model
@tools.ormcache()
def _installed(self):
""" Return the set of installed modules as a dictionary {name: id} """
return {
module.name: module.id
for module in self.sudo().search([('state', '=', 'installed')])
}
DEP_STATES = STATES + [('unknown', 'Unknown')]
class ModuleDependency(models.Model):
_name = "ir.module.module.dependency"
_description = "Module dependency"
# the dependency name
name = fields.Char(index=True)
# the module that depends on it
module_id = fields.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the dependency, and its status
depend_id = fields.Many2one('ir.module.module', 'Dependency', compute='_compute_depend')
state = fields.Selection(DEP_STATES, string='Status', compute='_compute_state')
@api.multi
@api.depends('name')
def _compute_depend(self):
# retrieve all modules corresponding to the dependency names
names = list(set(dep.name for dep in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = dict((mod.name, mod) for mod in mods)
for dep in self:
dep.depend_id = name_mod.get(dep.name)
@api.one
@api.depends('depend_id.state')
def _compute_state(self):
self.state = self.depend_id.state or 'unknown'
class ModuleExclusion(models.Model):
_name = "ir.module.module.exclusion"
_description = "Module exclusion"
# the exclusion name
name = fields.Char(index=True)
# the module that excludes it
module_id = fields.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the exclusion, and its status
exclusion_id = fields.Many2one('ir.module.module', 'Exclusion Module', compute='_compute_exclusion')
state = fields.Selection(DEP_STATES, string='Status', compute='_compute_state')
@api.multi
@api.depends('name')
def _compute_exclusion(self):
# retrieve all modules corresponding to the exclusion names
names = list(set(excl.name for excl in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = {mod.name: mod for mod in mods}
for excl in self:
excl.exclusion_id = name_mod.get(excl.name)
@api.one
@api.depends('exclusion_id.state')
def _compute_state(self):
self.state = self.exclusion_id.state or 'unknown'
| agpl-3.0 | 3,125,939,905,392,733,700 | 399,035,112,214,931,800 | 42.441358 | 184 | 0.58958 | false |
sergg75/dataModels | Environment/AirQualityObserved/harvest/madrid_air_quality_harvest.py | 1 | 10084 | #!../bin/python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import csv
import datetime
import json
import urllib2
import StringIO
import logging
import logging.handlers
import re
from pytz import timezone
import contextlib
import copy
# Entity type
AMBIENT_TYPE_NAME = 'AirQualityObserved'
# List of known air quality stations
station_dict = { }
# Orion service that will store the data
orion_service = 'http://localhost:1030'
logger = None
madrid_tz = timezone('CET')
pollutant_dict = {
'01': 'SO2',
'06': 'CO',
'07': 'NO',
'08': 'NO2',
'09': 'PM2.5',
'10': 'PM10',
'12': 'NOx',
'14': 'O3',
'20': 'TOL',
'30': 'BEN',
'35': 'EBE',
'37': 'MXY',
'38': 'PXY',
'39': 'OXY',
'42': 'TCH',
'43': 'CH4',
'44': 'NHMC'
}
pollutant_descriptions = {
'01': 'Sulfur Dioxide',
'06': 'Carbon Monoxide',
'07': 'Nitrogen Monoxide',
'08': 'Nitrogen Dioxide',
'09': 'Particles lower than 2.5',
'10': 'Particles lower than 10',
'12': 'Nitrogen oxides',
'14': 'Ozone',
'20': 'Toluene',
'30': 'Benzene',
'35': 'Etilbenzene',
'37': 'Metaxylene',
'38': 'Paraxylene',
'39': 'Orthoxylene',
'42': 'Total Hydrocarbons',
'43': 'Hydrocarbons - Methane',
'44': 'Non-methane hydrocarbons - Hexane'
}
other_dict = {
'80': 'ultravioletRadiation',
'81': 'windSpeed',
'82': 'windDirection',
'83': 'temperature',
'86': 'relativeHumidity',
'87': 'barometricPressure',
'88': 'solarRadiation',
'89': 'precipitation',
'92': 'acidRainLevel'
}
other_descriptions = {
'80': 'Ultraviolet Radiation',
'81': 'Wind Speed',
'82': 'Wind Direction',
'83': 'temperature',
'86': 'Relative Humidity',
'87': 'Barometric Pressure',
'88': 'Solar Radiation',
'89': 'Precipitation',
'92': 'Acid Rain Level'
}
dataset_url = 'http://datos.madrid.es/egob/catalogo/212531-7916318-calidad-aire-tiempo-real.txt'
# Statistics for tracking purposes
persisted_entities = 0
in_error_entities = 0
MIME_JSON = 'application/json'
FIWARE_SERVICE = 'AirQuality'
FIWARE_SPATH = '/Spain_Madrid'
# Sanitize string to avoid forbidden characters by Orion
def sanitize(str_in):
return re.sub(r"[<(>)\"\'=;]", "", str_in)
# Obtains air quality data and harmonizes it, persisting to Orion
def get_air_quality_madrid():
req = urllib2.Request(url=dataset_url)
with contextlib.closing(urllib2.urlopen(req)) as f:
csv_data = f.read()
csv_file = StringIO.StringIO(csv_data)
reader = csv.reader(csv_file, delimiter=',')
# Dictionary with station data indexed by station code
# An array per station code containing one element per hour
stations = { }
for row in reader:
station_code = str(row[0]) + str(row[1]) + str(row[2])
station_num = row[2]
if not station_dict[station_num]:
continue
if not station_code in stations:
stations[station_code] = []
magnitude = row[3]
if (not magnitude in pollutant_dict) and (not magnitude in other_dict):
continue
is_other = None
if magnitude in pollutant_dict:
property_name = pollutant_dict[magnitude]
property_desc = pollutant_descriptions[magnitude]
is_other = False
if magnitude in other_dict:
property_name = other_dict[magnitude]
property_desc = other_descriptions[magnitude]
is_other = True
hour = 0
for x in xrange(9, 57, 2):
value = row[x]
value_control = row[x + 1]
if value_control == 'V':
# A new entity object is created if it does not exist yet
if (len(stations[station_code]) < hour + 1):
stations[station_code].append(build_station(station_num, station_code, hour, row))
elif (not 'id' in stations[station_code][hour]):
stations[station_code][hour] = build_station(station_num, station_code, hour, row)
param_value = float(value)
if not is_other:
unit_code = 'GQ'
if property_name == 'CO':
unit_code = 'GP'
measurand_data = [property_name, str(param_value), unit_code, property_desc]
stations[station_code][hour]['measurand']['value'].append(','.join(measurand_data))
else:
if property_name == 'relativeHumidity':
param_value = param_value / 100
stations[station_code][hour][property_name] = {
'value': param_value
}
else:
# ensure there are no holes in the data
if (len(stations[station_code]) < hour + 1):
stations[station_code].append({})
hour += 1
print len(stations[station_code])
# Now persisting data to Orion Context Broker
for station in stations:
station_data = stations[station]
data_array = []
for data in station_data:
if 'id' in data:
data_array.append(data)
if len(data_array) > 0:
logger.debug("Retrieved data for %s at %s (last hour)", station, data_array[-1]['dateObserved']['value'])
# Last measurement is duplicated to have an entity with the latest measurement obtained
last_measurement = data_array[-1]
last_measurement['id'] = 'Madrid-AirQualityObserved-' + last_measurement['stationCode']['value'] + '-' + 'latest'
else: logger.warn('No data retrieved for: %s', station)
post_station_data(station, data_array)
#############
# Builds a new entity of type AirQualityObserved
def build_station(station_num, station_code, hour, row):
station_data = {
'type': AMBIENT_TYPE_NAME,
'measurand': {
'type': 'List',
'value': []
},
'stationCode': {
'value': station_code
},
'stationName': {
'value': sanitize(station_dict[station_num]['name'])
},
'address': {
'type': 'PostalAddress',
'value': {
'addressCountry': 'ES',
'addressLocality': 'Madrid',
'streetAddress': sanitize(station_dict[station_num]['address'])
}
},
'location': {
'type': 'geo:json',
'value': station_dict[station_num]['location']['value'] or None
},
'source': {
'type': 'URL',
'value': 'http://datos.madrid.es'
},
'dataProvider': {
'value': 'TEF'
}
}
valid_from = datetime.datetime(int(row[6]), int(row[7]), int(row[8]), hour)
station_data['id'] = 'Madrid-AirQualityObserved-' + station_code + '-' + valid_from.isoformat()
valid_to = (valid_from + datetime.timedelta(hours=1))
# Adjust timezones
valid_from = valid_from.replace(tzinfo=madrid_tz)
valid_to = valid_to.replace(tzinfo=madrid_tz)
station_data['validity'] = {
'value': {
'from': valid_from.isoformat(),
'to': valid_to.isoformat()
},
'type': 'StructuredValue'
}
station_data['hour'] = {
'value': str(hour) + ':' + '00'
}
observ_corrected_date = valid_from
station_data['dateObserved'] = {
'type': 'DateTime',
'value': observ_corrected_date.isoformat()
}
return station_data
# POST data to an Orion Context Broker instance using NGSIv2 API
def post_station_data(station_code, data):
if len(data) == 0:
return
payload = {
'actionType': 'APPEND',
'entities': data
}
data_as_str = json.dumps(payload)
headers = {
'Content-Type': MIME_JSON,
'Content-Length': len(data_as_str),
'Fiware-Service': FIWARE_SERVICE,
'Fiware-Servicepath': FIWARE_SPATH
}
req = urllib2.Request(url=(orion_service + '/v2/op/update'), data=data_as_str, headers=headers)
logger.debug('Going to persist %s to %s - %d', station_code, orion_service, len(data))
try:
with contextlib.closing(urllib2.urlopen(req)) as f:
global persisted_entities
logger.debug("Entity successfully created: %s", station_code)
persisted_entities = persisted_entities + 1
except urllib2.URLError as e:
global in_error_entities
logger.error('Error while POSTing data to Orion: %d %s', e.code, e.read())
logger.debug('Data which failed: %s', data_as_str)
in_error_entities = in_error_entities + 1
# Reads station data from CSV file
def read_station_csv():
with contextlib.closing(open('madrid_airquality_stations.csv', 'rU')) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
index = 0
for row in reader:
if index <> 0:
station_code = row[2]
station_name = row[3]
station_address = row[4]
station_coords = {
'type': 'geo:json',
'value': {
'type': 'Point',
'coordinates': [float(row[0]), float(row[1])]
}
}
station_dict[station_code.zfill(3)] = {
'name': station_name,
'address': station_address,
'location': station_coords
}
index += 1
station_dict['099'] = {
'name': 'average',
'address': None,
'location': None
}
def setup_logger():
global logger
LOG_FILENAME = 'harvest_madrid.log'
# Set up a specific logger with our desired output level
logger = logging.getLogger('Madrid')
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=2000000, backupCount=3)
formatter = logging.Formatter('%(levelname)s %(asctime)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == '__main__':
setup_logger()
read_station_csv()
logger.debug('#### Starting a new harvesting and harmonization cycle ... ####')
logger.debug('Number of air quality stations known: %d', len(station_dict.keys()))
get_air_quality_madrid()
logger.debug('Number of entities persisted: %d', persisted_entities)
logger.debug('Number of entities in error: %d', in_error_entities)
logger.debug('#### Harvesting cycle finished ... ####')
| mit | 2,488,354,978,633,439,000 | -5,186,001,509,777,392,000 | 26.327913 | 121 | 0.597679 | false |
tudorvio/nova | nova/virt/disk/mount/loop.py | 64 | 2270 | # Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Support for mounting images with the loop device."""
from oslo_log import log as logging
from nova.i18n import _, _LI
from nova import utils
from nova.virt.disk.mount import api
LOG = logging.getLogger(__name__)
class LoopMount(api.Mount):
"""loop back support for raw images."""
mode = 'loop'
def _inner_get_dev(self):
out, err = utils.trycmd('losetup', '--find', '--show',
self.image.path,
run_as_root=True)
if err:
self.error = _('Could not attach image to loopback: %s') % err
LOG.info(_LI('Loop mount error: %s'), self.error)
self.linked = False
self.device = None
return False
self.device = out.strip()
LOG.debug("Got loop device %s", self.device)
self.linked = True
return True
def get_dev(self):
# NOTE(mikal): the retry is required here in case we are low on loop
# devices. Note however that modern kernels will use more loop devices
# if they exist. If you're seeing lots of retries, consider adding
# more devices.
return self._get_dev_retry_helper()
def unget_dev(self):
if not self.linked:
return
# NOTE(mikal): On some kernels, losetup -d will intermittently fail,
# thus leaking a loop device unless the losetup --detach is retried:
# https://lkml.org/lkml/2012/9/28/62
LOG.debug("Release loop device %s", self.device)
utils.execute('losetup', '--detach', self.device, run_as_root=True,
attempts=3)
self.linked = False
self.device = None
| apache-2.0 | -5,159,345,178,170,271,000 | -4,574,251,510,896,136,700 | 35.031746 | 78 | 0.627753 | false |
kushG/osf.io | tests/test_rubeus.py | 2 | 19647 | #!/usr/bin/env python
# encoding: utf-8
import os
from types import NoneType
from xmlrpclib import DateTime
import mock
from nose.tools import *
from webtest_plus import TestApp
from tests.base import OsfTestCase
from tests.factories import (UserFactory, ProjectFactory, NodeFactory,
AuthFactory, PointerFactory, DashboardFactory, FolderFactory, RegistrationFactory)
from framework.auth import Auth
from website.util import rubeus, api_url_for
import website.app
from website.util.rubeus import sort_by_name
from website.settings import ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_ID, \
ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME
app = website.app.init_app(
routes=True, set_backends=False, settings_module='website.settings'
)
class TestRubeus(OsfTestCase):
def setUp(self):
super(TestRubeus, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(user=self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('s3', self.consolidated_auth)
self.project.creator.add_addon('s3', self.consolidated_auth)
self.node_settings = self.project.get_addon('s3')
self.user_settings = self.project.creator.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.user_settings
self.node_settings.save()
def test_hgrid_dummy(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
# FIXME: These tests are very brittle.
expected = {
'isPointer': False,
'provider': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon Simple Storage Service: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'extra': None,
'buttons': None,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
expected['permissions'] = permissions
actual = rubeus.build_addon_root(node_settings, node_settings.bucket, permissions=permissions)
assert actual['urls']['fetch']
assert actual['urls']['upload']
del actual['urls']
assert_equals(actual, expected)
def test_build_addon_root_has_correct_upload_limits(self):
self.node_settings.config.max_file_size = 10
self.node_settings.config.high_max_file_size = 20
node = self.project
user = self.project.creator
auth = Auth(user)
permissions = {
'view': node.can_view(auth),
'edit': node.can_edit(auth) and not node.is_registration,
}
result = rubeus.build_addon_root(
self.node_settings,
self.node_settings.bucket,
permissions=permissions,
user=user
)
assert_equal(result['accept']['maxSize'], self.node_settings.config.max_file_size)
# user now has elevated upload limit
user.system_tags.append('high_upload_limit')
user.save()
result = rubeus.build_addon_root(
self.node_settings,
self.node_settings.bucket,
permissions=permissions,
user=user
)
assert_equal(
result['accept']['maxSize'],
self.node_settings.config.high_max_file_size
)
def test_hgrid_dummy_fail(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
rv = {
'isPointer': False,
'addon': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon Simple Storage Service: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'permissions': {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
},
'urls': {
'fetch': node.api_url + 's3/hgrid/',
'upload': node.api_url + 's3/upload/'
},
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
assert_not_equals(rubeus.build_addon_root(
node_settings, node_settings.bucket, permissions=permissions), rv)
def test_hgrid_dummy_overrides(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
expected = {
'isPointer': False,
'provider': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon Simple Storage Service: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'permissions': {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
},
'urls': {},
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'extra': None,
'buttons': None,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
assert_equal(
rubeus.build_addon_root(
node_settings, node_settings.bucket,
permissions=permissions, urls={}
),
expected
)
def test_serialize_private_node(self):
user = UserFactory()
auth = Auth(user=user)
public = ProjectFactory.build(is_public=True)
# Add contributor with write permissions to avoid admin permission cascade
public.add_contributor(user, permissions=['read', 'write'])
public.save()
private = ProjectFactory(project=public, is_public=False)
NodeFactory(project=private)
collector = rubeus.NodeFileCollector(node=public, auth=auth)
private_dummy = collector._serialize_node(private)
assert_false(private_dummy['permissions']['edit'])
assert_false(private_dummy['permissions']['view'])
assert_equal(private_dummy['name'], 'Private Component')
assert_equal(len(private_dummy['children']), 0)
def test_collect_components_deleted(self):
node = NodeFactory(creator=self.project.creator, project=self.project)
node.is_deleted = True
collector = rubeus.NodeFileCollector(
self.project, Auth(user=UserFactory())
)
nodes = collector._collect_components(self.project, visited=[])
assert_equal(len(nodes), 0)
def test_serialized_pointer_has_flag_indicating_its_a_pointer(self):
pointer = PointerFactory()
serializer = rubeus.NodeFileCollector(node=pointer, auth=self.consolidated_auth)
ret = serializer._serialize_node(pointer)
assert_true(ret['isPointer'])
# TODO: Make this more reusable across test modules
mock_addon = mock.Mock()
serialized = {
'addon': 'mockaddon',
'name': 'Mock Addon',
'isAddonRoot': True,
'extra': '',
'permissions': {'view': True, 'edit': True},
'urls': {
'fetch': '/fetch',
'delete': '/delete'
}
}
mock_addon.config.get_hgrid_data.return_value = [serialized]
class TestSerializingNodeWithAddon(OsfTestCase):
def setUp(self):
super(TestSerializingNodeWithAddon, self).setUp()
self.auth = AuthFactory()
self.project = ProjectFactory(creator=self.auth.user)
self.project.get_addons = mock.Mock()
self.project.get_addons.return_value = [mock_addon]
self.serializer = rubeus.NodeFileCollector(node=self.project, auth=self.auth)
def test_collect_addons(self):
ret = self.serializer._collect_addons(self.project)
assert_equal(ret, [serialized])
def test_sort_by_name(self):
files = [
{'name': 'F.png'},
{'name': 'd.png'},
{'name': 'B.png'},
{'name': 'a.png'},
{'name': 'c.png'},
{'name': 'e.png'},
{'name': 'g.png'},
]
sorted_files = [
{'name': 'a.png'},
{'name': 'B.png'},
{'name': 'c.png'},
{'name': 'd.png'},
{'name': 'e.png'},
{'name': 'F.png'},
{'name': 'g.png'},
]
ret = sort_by_name(files)
for index, value in enumerate(ret):
assert_equal(value['name'], sorted_files[index]['name'])
def test_sort_by_name_none(self):
files = None
sorted_files = None
ret = sort_by_name(files)
assert_equal(ret, sorted_files)
def test_serialize_node(self):
ret = self.serializer._serialize_node(self.project)
assert_equal(
len(ret['children']),
len(self.project.get_addons.return_value) + len(self.project.nodes)
)
assert_equal(ret['kind'], rubeus.FOLDER)
assert_equal(ret['name'], 'Project: {0}'.format(self.project.title))
assert_equal(
ret['permissions'],
{
'view': True,
'edit': True,
}
)
assert_equal(
ret['urls'],
{
'upload': None,
'fetch': None,
},
)
def test_collect_js_recursive(self):
self.project.get_addons.return_value[0].config.include_js = {'files': ['foo.js']}
self.project.get_addons.return_value[0].config.short_name = 'dropbox'
node = NodeFactory(project=self.project)
mock_node_addon = mock.Mock()
mock_node_addon.config.include_js = {'files': ['bar.js', 'baz.js']}
mock_node_addon.config.short_name = 'dropbox'
node.get_addons = mock.Mock()
node.get_addons.return_value = [mock_node_addon]
result = rubeus.collect_addon_js(self.project)
assert_in('foo.js', result)
assert_in('bar.js', result)
assert_in('baz.js', result)
def test_collect_js_unique(self):
self.project.get_addons.return_value[0].config.include_js = {'files': ['foo.js']}
self.project.get_addons.return_value[0].config.short_name = 'dropbox'
node = NodeFactory(project=self.project)
mock_node_addon = mock.Mock()
mock_node_addon.config.include_js = {'files': ['foo.js', 'baz.js']}
mock_node_addon.config.short_name = 'dropbox'
node.get_addons = mock.Mock()
node.get_addons.return_value = [mock_node_addon]
result = rubeus.collect_addon_js(self.project)
assert_in('foo.js', result)
assert_in('baz.js', result)
class TestSerializingEmptyDashboard(OsfTestCase):
def setUp(self):
super(TestSerializingEmptyDashboard, self).setUp()
self.dash = DashboardFactory()
self.auth = AuthFactory(user=self.dash.creator)
self.dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
def test_empty_dashboard_hgrid_representation_is_list(self):
assert_is_instance(self.dash_hgrid, list)
def test_empty_dashboard_has_proper_number_of_smart_folders(self):
assert_equal(len(self.dash_hgrid), 2)
def test_empty_dashboard_smart_folders_have_correct_names_and_ids(self):
for node_hgrid in self.dash_hgrid:
assert_in(node_hgrid['name'], (ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME))
for node_hgrid in self.dash_hgrid:
if node_hgrid['name'] == ALL_MY_PROJECTS_ID:
assert_equal(node_hgrid['node_id'], ALL_MY_PROJECTS_ID)
elif node_hgrid['name'] == ALL_MY_REGISTRATIONS_ID:
assert_equal(node_hgrid['node_id'], ALL_MY_REGISTRATIONS_ID)
def test_empty_dashboard_smart_folders_are_empty(self):
for node_hgrid in self.dash_hgrid:
assert_equal(node_hgrid['children'], [])
def test_empty_dashboard_are_valid_folders(self):
for node in self.dash_hgrid:
assert_valid_hgrid_folder(node)
def test_empty_dashboard_smart_folders_are_valid_smart_folders(self):
for node in self.dash_hgrid:
assert_valid_hgrid_smart_folder(node)
class TestSerializingPopulatedDashboard(OsfTestCase):
def setUp(self):
super(TestSerializingPopulatedDashboard, self).setUp()
self.dash = DashboardFactory()
self.user = self.dash.creator
self.auth = AuthFactory(user=self.user)
self.init_dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
def test_dashboard_adding_one_folder_increases_size_by_one(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_equal(len(dash_hgrid), len(self.init_dash_hgrid) + 1)
def test_dashboard_adding_one_folder_does_not_remove_smart_folders(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_true(
{ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME, folder.title} <=
{node_hgrid['name'] for node_hgrid in dash_hgrid}
)
def test_dashboard_adding_one_folder_increases_size_by_one_in_hgrid_representation(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
project = ProjectFactory(creator=self.user)
folder.add_pointer(project,self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_equal(len(dash_hgrid), len(self.init_dash_hgrid) + 1)
class TestSerializingFolders(OsfTestCase):
def setUp(self):
super(TestSerializingFolders, self).setUp()
self.user = UserFactory()
self.auth = AuthFactory(user=self.user)
def test_serialized_folder_is_valid_folder(self):
folder = FolderFactory(creator=self.user)
folder_hgrid = rubeus.to_project_hgrid(folder, self.auth)
assert_equal(folder_hgrid, [])
def test_serialize_folder_containing_folder_increases_size_by_one(self):
outer_folder = FolderFactory(creator=self.user)
folder_hgrid = rubeus.to_project_hgrid(outer_folder, self.auth)
inner_folder = FolderFactory(creator=self.user)
outer_folder.add_pointer(inner_folder, self.auth)
new_hgrid = rubeus.to_project_hgrid(outer_folder, self.auth)
assert_equal(len(folder_hgrid) + 1, len(new_hgrid))
class TestSmartFolderViews(OsfTestCase):
def setUp(self):
super(TestSmartFolderViews, self).setUp()
self.app = TestApp(app)
self.dash = DashboardFactory()
self.user = self.dash.creator
self.auth = AuthFactory(user=self.user)
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_adding_project_to_dashboard_increases_json_size_by_one(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=self.user)
with app.test_request_context():
url = api_url_for('get_dashboard')
res = self.app.get(url + ALL_MY_PROJECTS_ID)
import pprint;pp = pprint.PrettyPrinter()
init_len = len(res.json[u'data'])
ProjectFactory(creator=self.user)
res = self.app.get(url + ALL_MY_PROJECTS_ID)
assert_equal(len(res.json[u'data']), init_len + 1)
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_adding_registration_to_dashboard_increases_json_size_by_one(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=self.user)
with app.test_request_context():
url = api_url_for('get_dashboard')
res = self.app.get(url + ALL_MY_REGISTRATIONS_ID)
init_len = len(res.json[u'data'])
RegistrationFactory(creator=self.user)
res = self.app.get(url + ALL_MY_REGISTRATIONS_ID)
assert_equal(len(res.json[u'data']), init_len + 1)
def assert_valid_hgrid_folder(node_hgrid):
folder_types = {
'name': str,
'children': list,
'contributors': list,
'dateModified': (DateTime, NoneType),
'node_id': str,
'modifiedDelta': int,
'modifiedBy': (dict, NoneType),
'urls': dict,
'isDashboard': bool,
'expand': bool,
'permissions': dict,
'isSmartFolder': bool,
'childrenCount': int,
}
keys_types = {
'urls': (str, NoneType),
'permissions': bool,
}
folder_values = {
'parentIsFolder': True,
'isPointer': False,
'isFolder': True,
'kind': 'folder',
'type': 'smart-folder'
}
if isinstance(node_hgrid, list):
node_hgrid = node_hgrid[0]['data']
else:
assert_is_instance(node_hgrid, dict)
for key, correct_value in folder_values.items():
assert_equal(node_hgrid[key], correct_value)
for key, correct_type in folder_types.items():
assert_is_instance(node_hgrid[key], correct_type)
for key, correct_type in keys_types.items():
for inner_key, inner_value in node_hgrid[key].items():
assert_is_instance(inner_value, correct_type)
valid_keys = set(folder_types.keys()).union(folder_values.keys())
for key in node_hgrid.keys():
assert_in(key, valid_keys)
def assert_valid_hgrid_smart_folder(node_hgrid):
smart_folder_values = {
'contributors': [],
'isPointer': False,
'dateModified': None,
'modifiedDelta': 0,
'modifiedBy': None,
'isSmartFolder': True,
'urls': {
'upload': None,
'fetch': None
},
'isDashboard': False,
'permissions': {
'edit': False,
'acceptsDrops': False,
'copyable': False,
'movable': False,
'view': True
}
}
assert_valid_hgrid_folder(node_hgrid)
for attr, correct_value in smart_folder_values.items():
assert_equal(correct_value, node_hgrid[attr])
| apache-2.0 | 3,479,602,515,183,115,300 | 5,722,214,325,870,607,000 | 33.712014 | 102 | 0.592762 | false |
nhuntwalker/astroML | book_figures/chapter5/fig_likelihood_cauchy.py | 3 | 3219 | """
Log-likelihood for Cauchy Distribution
--------------------------------------
Figure 5.10
An illustration of the logarithm of posterior probability distribution for
:math:`\mu` and :math:`\gamma`, :math:`L(\mu,\gamma)` (see eq. 5.75) for
N = 10 (the sample is generated using the Cauchy distribution with
:math:`\mu = 0` and :math:`\gamma = 2`). The maximum of L is renormalized
to 0, and color coded as shown in the legend. The contours enclose the regions
that contain 0.683, 0.955 and 0.997 of the cumulative (integrated) posterior
probability.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import cauchy
from astroML.plotting.mcmc import convert_to_stdev
from astroML.stats import median_sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def cauchy_logL(xi, gamma, mu):
"""Equation 5.74: cauchy likelihood"""
xi = np.asarray(xi)
n = xi.size
shape = np.broadcast(gamma, mu).shape
xi = xi.reshape(xi.shape + tuple([1 for s in shape]))
return ((n - 1) * np.log(gamma)
- np.sum(np.log(gamma ** 2 + (xi - mu) ** 2), 0))
#------------------------------------------------------------
# Define the grid and compute logL
gamma = np.linspace(0.1, 5, 70)
mu = np.linspace(-5, 5, 70)
np.random.seed(44)
mu0 = 0
gamma0 = 2
xi = cauchy(mu0, gamma0).rvs(10)
logL = cauchy_logL(xi, gamma[:, np.newaxis], mu)
logL -= logL.max()
#------------------------------------------------------------
# Find the max and print some information
i, j = np.where(logL >= np.max(logL))
print("mu from likelihood:", mu[j])
print("gamma from likelihood:", gamma[i])
print()
med, sigG = median_sigmaG(xi)
print("mu from median", med)
print("gamma from quartiles:", sigG / 1.483) # Equation 3.54
print()
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
plt.imshow(logL, origin='lower', cmap=plt.cm.binary,
extent=(mu[0], mu[-1], gamma[0], gamma[-1]),
aspect='auto')
plt.colorbar().set_label(r'$\log(L)$')
plt.clim(-5, 0)
plt.contour(mu, gamma, convert_to_stdev(logL),
levels=(0.683, 0.955, 0.997),
colors='k')
plt.text(0.5, 0.93,
r'$L(\mu,\gamma)\ \mathrm{for}\ \bar{x}=0,\ \gamma=2,\ n=10$',
bbox=dict(ec='k', fc='w', alpha=0.9),
ha='center', va='center', transform=plt.gca().transAxes)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\gamma$')
plt.show()
| bsd-2-clause | -1,009,195,184,392,201,300 | 791,259,665,300,806,900 | 32.185567 | 79 | 0.610749 | false |
nhomar/odoo-mirror | addons/mail/__openerp__.py | 41 | 3832 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Social Network',
'version': '1.0',
'category': 'Social Network',
'sequence': 2,
'summary': 'Discussions, Mailing Lists, News',
'description': """
Business oriented Social Networking
===================================
The Social Networking module provides a unified social network abstraction layer allowing applications to display a complete
communication history on documents with a fully-integrated email and message management system.
It enables the users to read and send messages as well as emails. It also provides a feeds page combined to a subscription mechanism that allows to follow documents and to be constantly updated about recent news.
Main Features
-------------
* Clean and renewed communication history for any OpenERP document that can act as a discussion topic
* Subscription mechanism to be updated about new messages on interesting documents
* Unified feeds page to see recent messages and activity on followed documents
* User communication through the feeds page
* Threaded discussion design on documents
* Relies on the global outgoing mail server - an integrated email management system - allowing to send emails with a configurable scheduler-based processing engine
* Includes an extensible generic email composition assistant, that can turn into a mass-mailing assistant and is capable of interpreting simple *placeholder expressions* that will be replaced with dynamic data when each email is actually sent.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/enterprise-social-network',
'depends': ['base', 'base_setup'],
'data': [
'wizard/invite_view.xml',
'wizard/mail_compose_message_view.xml',
'mail_message_subtype.xml',
'res_config_view.xml',
'mail_message_view.xml',
'mail_mail_view.xml',
'mail_followers_view.xml',
'mail_thread_view.xml',
'mail_group_view.xml',
'res_partner_view.xml',
'data/mail_data.xml',
'data/mail_group_data.xml',
'security/mail_security.xml',
'security/ir.model.access.csv',
'mail_alias_view.xml',
'res_users_view.xml',
'views/mail.xml',
],
'demo': [
'data/mail_demo.xml',
'data/mail_group_demo_data.xml',
],
'installable': True,
'application': True,
'images': [
'images/inbox.jpeg',
'images/messages_form.jpeg',
'images/messages_list.jpeg',
'images/email.jpeg',
'images/join_a_group.jpeg',
'images/share_a_message.jpeg',
],
'qweb': [
'static/src/xml/mail.xml',
'static/src/xml/mail_followers.xml',
'static/src/xml/announcement.xml',
'static/src/xml/suggestions.xml',
],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,751,290,253,442,937,900 | 1,073,551,879,559,181,000 | 42.05618 | 243 | 0.649008 | false |
pombredanne/bitmath | tests/test_properties.py | 2 | 2080 | # -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014 Tim Bielawa <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests to verify that type properties are accessable and immutable
"""
from . import TestCase
import bitmath
class TestAttributeProperties(TestCase):
def setUp(self):
self.kib = bitmath.KiB(1)
self.kib_bits = 8192
self.kib_bytes = 1024
self.kib_value = 1
def test_read_bits(self):
"""Read the 'bits' property of a bitmath type"""
self.assertEqual(self.kib.bits, self.kib_bits)
def test_read_bytes(self):
"""Read the 'bytes' property of a bitmath type"""
self.assertEqual(self.kib.bytes, self.kib_bytes)
def test_read_value(self):
"""Read the 'value' property of a bitmath type"""
self.assertEqual(self.kib.value, self.kib_value)
def test_write_property_fails(self):
"""bitmath type's properties are read-only"""
with self.assertRaises(AttributeError):
self.kib.value += 42
| mit | -636,601,776,141,762,600 | 3,610,076,558,534,031,400 | 34.844828 | 70 | 0.709476 | false |
denovator/myfriki | lib/jinja2/jinja2/_compat.py | 638 | 4042 | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import Mapping as mapping_types
except ImportError:
import UserDict
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
function_type = type(_func)
generator_type = type(_func())
method_type = type(_C().method)
code_type = type(_C.method.__code__)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
traceback_type = type(_tb)
frame_type = type(_tb.tb_frame)
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
try:
from threading import Lock as allocate_lock
except ImportError:
from dummy_thread import allocate_lock
| apache-2.0 | -390,231,943,338,218,100 | 3,896,956,098,602,708,500 | 25.946667 | 77 | 0.641762 | false |
MichaelHills/seizure-detection | common/data.py | 1 | 1254 | import io
import os
import os.path
def makedirs(dir):
try:
os.makedirs(dir)
except:
pass
class jsdict(dict):
def __init__(self, data):
self.__dict__ = data
class CachedDataLoader:
def __init__(self, dir):
self.dir = dir
makedirs(dir)
# try to load data from filename, if it doesn't exist then run the func()
# and save the data to filename
def load(self, filename, func):
def wrap_data(data):
if isinstance(data, list):
return [jsdict(x) for x in data]
else:
return jsdict(data)
if filename is not None:
filename = os.path.join(self.dir, filename)
data = io.load_hkl_file(filename)
if data is not None:
return wrap_data(data)
data = io.load_pickle_file(filename)
if data is not None:
return wrap_data(data)
data = func()
if filename is not None:
if isinstance(data, dict) and '__use_pickle' not in data.keys():
if io.save_hkl_file(filename, data):
return wrap_data(data)
io.save_pickle_file(filename, data)
return wrap_data(data)
| mit | -2,701,064,746,789,719,600 | -2,702,493,154,254,422,500 | 24.08 | 77 | 0.541467 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.